diff mbox series

[RFC,12/14] mm: Free folios directly in move_folios_to_lru()

Message ID 20230825135918.4164671-13-willy@infradead.org (mailing list archive)
State New
Headers show
Series Rearrange batched folio freeing | expand

Commit Message

Matthew Wilcox Aug. 25, 2023, 1:59 p.m. UTC
The few folios which can't be moved to the LRU list (because their
refcount dropped to zero) used to be returned to the caller to dispose
of.  Make this simpler to call by freeing the folios directly through
free_unref_folios().

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
---
 mm/vmscan.c | 31 ++++++++++++-------------------
 1 file changed, 12 insertions(+), 19 deletions(-)

Comments

Ryan Roberts Aug. 31, 2023, 3:46 p.m. UTC | #1
On 25/08/2023 14:59, Matthew Wilcox (Oracle) wrote:
> The few folios which can't be moved to the LRU list (because their
> refcount dropped to zero) used to be returned to the caller to dispose
> of.  Make this simpler to call by freeing the folios directly through
> free_unref_folios().
> 
> Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
> ---
>  mm/vmscan.c | 31 ++++++++++++-------------------
>  1 file changed, 12 insertions(+), 19 deletions(-)
> 
> diff --git a/mm/vmscan.c b/mm/vmscan.c
> index 965c429847fd..d5080510608e 100644
> --- a/mm/vmscan.c
> +++ b/mm/vmscan.c
> @@ -2489,8 +2489,9 @@ static unsigned int move_folios_to_lru(struct lruvec *lruvec,
>  		struct list_head *list)

The comment for this function is now stale:

/*
 * move_folios_to_lru() moves folios from private @list to appropriate LRU list.
 * On return, @list is reused as a list of folios to be freed by the caller.
 *
 * Returns the number of pages moved to the given lruvec.
 */

I think the "On return" bit is no longer true.


>  {
>  	int nr_pages, nr_moved = 0;
> -	LIST_HEAD(folios_to_free);
> +	struct folio_batch free_folios;
>  
> +	folio_batch_init(&free_folios);
>  	while (!list_empty(list)) {
>  		struct folio *folio = lru_to_folio(list);
>  
> @@ -2519,12 +2520,12 @@ static unsigned int move_folios_to_lru(struct lruvec *lruvec,
>  		if (unlikely(folio_put_testzero(folio))) {
>  			__folio_clear_lru_flags(folio);
>  
> -			if (unlikely(folio_test_large(folio))) {
> +			if (folio_batch_add(&free_folios, folio) == 0) {
>  				spin_unlock_irq(&lruvec->lru_lock);
> -				destroy_large_folio(folio);
> +				mem_cgroup_uncharge_folios(&free_folios);
> +				free_unref_folios(&free_folios);
>  				spin_lock_irq(&lruvec->lru_lock);
> -			} else
> -				list_add(&folio->lru, &folios_to_free);
> +			}
>  
>  			continue;
>  		}
> @@ -2541,10 +2542,12 @@ static unsigned int move_folios_to_lru(struct lruvec *lruvec,
>  			workingset_age_nonresident(lruvec, nr_pages);
>  	}
>  
> -	/*
> -	 * To save our caller's stack, now use input list for pages to free.
> -	 */
> -	list_splice(&folios_to_free, list);
> +	if (free_folios.nr) {
> +		spin_unlock_irq(&lruvec->lru_lock);
> +		mem_cgroup_uncharge_folios(&free_folios);
> +		free_unref_folios(&free_folios);
> +		spin_lock_irq(&lruvec->lru_lock);
> +	}
>  
>  	return nr_moved;
>  }
> @@ -2623,8 +2626,6 @@ static unsigned long shrink_inactive_list(unsigned long nr_to_scan,
>  	spin_unlock_irq(&lruvec->lru_lock);
>  
>  	lru_note_cost(lruvec, file, stat.nr_pageout, nr_scanned - nr_reclaimed);
> -	mem_cgroup_uncharge_list(&folio_list);
> -	free_unref_page_list(&folio_list);
>  
>  	/*
>  	 * If dirty folios are scanned that are not queued for IO, it
> @@ -2765,8 +2766,6 @@ static void shrink_active_list(unsigned long nr_to_scan,
>  
>  	nr_activate = move_folios_to_lru(lruvec, &l_active);
>  	nr_deactivate = move_folios_to_lru(lruvec, &l_inactive);
> -	/* Keep all free folios in l_active list */
> -	list_splice(&l_inactive, &l_active);
>  
>  	__count_vm_events(PGDEACTIVATE, nr_deactivate);
>  	__count_memcg_events(lruvec_memcg(lruvec), PGDEACTIVATE, nr_deactivate);
> @@ -2776,8 +2775,6 @@ static void shrink_active_list(unsigned long nr_to_scan,
>  
>  	if (nr_rotated)
>  		lru_note_cost(lruvec, file, 0, nr_rotated);
> -	mem_cgroup_uncharge_list(&l_active);
> -	free_unref_page_list(&l_active);
>  	trace_mm_vmscan_lru_shrink_active(pgdat->node_id, nr_taken, nr_activate,
>  			nr_deactivate, nr_rotated, sc->priority, file);
>  }
> @@ -5238,10 +5235,6 @@ static int evict_folios(struct lruvec *lruvec, struct scan_control *sc, int swap
>  
>  	spin_unlock_irq(&lruvec->lru_lock);
>  
> -	mem_cgroup_uncharge_list(&list);
> -	free_unref_page_list(&list);
> -
> -	INIT_LIST_HEAD(&list);
>  	list_splice_init(&clean, &list);
>  
>  	if (!list_empty(&list)) {
Matthew Wilcox Sept. 1, 2023, 4:16 a.m. UTC | #2
On Thu, Aug 31, 2023 at 04:46:50PM +0100, Ryan Roberts wrote:
> On 25/08/2023 14:59, Matthew Wilcox (Oracle) wrote:
> > The few folios which can't be moved to the LRU list (because their
> > refcount dropped to zero) used to be returned to the caller to dispose
> > of.  Make this simpler to call by freeing the folios directly through
> > free_unref_folios().
> > 
> > Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
> > ---
> >  mm/vmscan.c | 31 ++++++++++++-------------------
> >  1 file changed, 12 insertions(+), 19 deletions(-)
> > 
> > diff --git a/mm/vmscan.c b/mm/vmscan.c
> > index 965c429847fd..d5080510608e 100644
> > --- a/mm/vmscan.c
> > +++ b/mm/vmscan.c
> > @@ -2489,8 +2489,9 @@ static unsigned int move_folios_to_lru(struct lruvec *lruvec,
> >  		struct list_head *list)
> 
> The comment for this function is now stale:
> 
> /*
>  * move_folios_to_lru() moves folios from private @list to appropriate LRU list.
>  * On return, @list is reused as a list of folios to be freed by the caller.
>  *
>  * Returns the number of pages moved to the given lruvec.
>  */
> 
> I think the "On return" bit is no longer true.

It's still true, but misleading ;-)  I'll amend it to

- * On return, @list is reused as a list of folios to be freed by the caller.
+ * On return, @list is empty.
diff mbox series

Patch

diff --git a/mm/vmscan.c b/mm/vmscan.c
index 965c429847fd..d5080510608e 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -2489,8 +2489,9 @@  static unsigned int move_folios_to_lru(struct lruvec *lruvec,
 		struct list_head *list)
 {
 	int nr_pages, nr_moved = 0;
-	LIST_HEAD(folios_to_free);
+	struct folio_batch free_folios;
 
+	folio_batch_init(&free_folios);
 	while (!list_empty(list)) {
 		struct folio *folio = lru_to_folio(list);
 
@@ -2519,12 +2520,12 @@  static unsigned int move_folios_to_lru(struct lruvec *lruvec,
 		if (unlikely(folio_put_testzero(folio))) {
 			__folio_clear_lru_flags(folio);
 
-			if (unlikely(folio_test_large(folio))) {
+			if (folio_batch_add(&free_folios, folio) == 0) {
 				spin_unlock_irq(&lruvec->lru_lock);
-				destroy_large_folio(folio);
+				mem_cgroup_uncharge_folios(&free_folios);
+				free_unref_folios(&free_folios);
 				spin_lock_irq(&lruvec->lru_lock);
-			} else
-				list_add(&folio->lru, &folios_to_free);
+			}
 
 			continue;
 		}
@@ -2541,10 +2542,12 @@  static unsigned int move_folios_to_lru(struct lruvec *lruvec,
 			workingset_age_nonresident(lruvec, nr_pages);
 	}
 
-	/*
-	 * To save our caller's stack, now use input list for pages to free.
-	 */
-	list_splice(&folios_to_free, list);
+	if (free_folios.nr) {
+		spin_unlock_irq(&lruvec->lru_lock);
+		mem_cgroup_uncharge_folios(&free_folios);
+		free_unref_folios(&free_folios);
+		spin_lock_irq(&lruvec->lru_lock);
+	}
 
 	return nr_moved;
 }
@@ -2623,8 +2626,6 @@  static unsigned long shrink_inactive_list(unsigned long nr_to_scan,
 	spin_unlock_irq(&lruvec->lru_lock);
 
 	lru_note_cost(lruvec, file, stat.nr_pageout, nr_scanned - nr_reclaimed);
-	mem_cgroup_uncharge_list(&folio_list);
-	free_unref_page_list(&folio_list);
 
 	/*
 	 * If dirty folios are scanned that are not queued for IO, it
@@ -2765,8 +2766,6 @@  static void shrink_active_list(unsigned long nr_to_scan,
 
 	nr_activate = move_folios_to_lru(lruvec, &l_active);
 	nr_deactivate = move_folios_to_lru(lruvec, &l_inactive);
-	/* Keep all free folios in l_active list */
-	list_splice(&l_inactive, &l_active);
 
 	__count_vm_events(PGDEACTIVATE, nr_deactivate);
 	__count_memcg_events(lruvec_memcg(lruvec), PGDEACTIVATE, nr_deactivate);
@@ -2776,8 +2775,6 @@  static void shrink_active_list(unsigned long nr_to_scan,
 
 	if (nr_rotated)
 		lru_note_cost(lruvec, file, 0, nr_rotated);
-	mem_cgroup_uncharge_list(&l_active);
-	free_unref_page_list(&l_active);
 	trace_mm_vmscan_lru_shrink_active(pgdat->node_id, nr_taken, nr_activate,
 			nr_deactivate, nr_rotated, sc->priority, file);
 }
@@ -5238,10 +5235,6 @@  static int evict_folios(struct lruvec *lruvec, struct scan_control *sc, int swap
 
 	spin_unlock_irq(&lruvec->lru_lock);
 
-	mem_cgroup_uncharge_list(&list);
-	free_unref_page_list(&list);
-
-	INIT_LIST_HEAD(&list);
 	list_splice_init(&clean, &list);
 
 	if (!list_empty(&list)) {