diff mbox series

[RFC,17/18] mm: Convert free_swap_cache() to take a folio

Message ID 20230830185041.3427464-3-willy@infradead.org (mailing list archive)
State New
Headers show
Series Rearrange batched folio freeing | expand

Commit Message

Matthew Wilcox Aug. 30, 2023, 6:50 p.m. UTC
All but one caller already has a folio, so convert
free_page_and_swap_cache() to have a folio and remove the call to
page_folio().

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
---
 include/linux/swap.h |  8 ++++----
 mm/khugepaged.c      |  2 +-
 mm/memory.c          |  2 +-
 mm/swap_state.c      | 12 ++++++------
 4 files changed, 12 insertions(+), 12 deletions(-)

Comments

Ryan Roberts Aug. 31, 2023, 6:49 p.m. UTC | #1
On 30/08/2023 19:50, Matthew Wilcox (Oracle) wrote:
> All but one caller already has a folio, so convert
> free_page_and_swap_cache() to have a folio and remove the call to
> page_folio().
> 
> Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>

Reviewed-by: Ryan Roberts <ryan.roberts@arm.com>

> ---
>  include/linux/swap.h |  8 ++++----
>  mm/khugepaged.c      |  2 +-
>  mm/memory.c          |  2 +-
>  mm/swap_state.c      | 12 ++++++------
>  4 files changed, 12 insertions(+), 12 deletions(-)
> 
> diff --git a/include/linux/swap.h b/include/linux/swap.h
> index 493487ed7c38..3536595e3bda 100644
> --- a/include/linux/swap.h
> +++ b/include/linux/swap.h
> @@ -449,9 +449,9 @@ static inline unsigned long total_swapcache_pages(void)
>  	return global_node_page_state(NR_SWAPCACHE);
>  }
>  
> -extern void free_swap_cache(struct page *page);
> -extern void free_page_and_swap_cache(struct page *);
> -extern void free_pages_and_swap_cache(struct encoded_page **, int);
> +void free_swap_cache(struct folio *folio);
> +void free_page_and_swap_cache(struct page *);
> +void free_pages_and_swap_cache(struct encoded_page **, int);
>  /* linux/mm/swapfile.c */
>  extern atomic_long_t nr_swap_pages;
>  extern long total_swap_pages;
> @@ -534,7 +534,7 @@ static inline void put_swap_device(struct swap_info_struct *si)
>  /* used to sanity check ptes in zap_pte_range when CONFIG_SWAP=0 */
>  #define free_swap_and_cache(e) is_pfn_swap_entry(e)
>  
> -static inline void free_swap_cache(struct page *page)
> +static inline void free_swap_cache(struct folio *folio)
>  {
>  }
>  
> diff --git a/mm/khugepaged.c b/mm/khugepaged.c
> index afc94c281035..7b83bb6a1199 100644
> --- a/mm/khugepaged.c
> +++ b/mm/khugepaged.c
> @@ -725,7 +725,7 @@ static void __collapse_huge_page_copy_succeeded(pte_t *pte,
>  		node_stat_sub_folio(src, NR_ISOLATED_ANON +
>  				folio_is_file_lru(src));
>  		folio_unlock(src);
> -		free_swap_cache(&src->page);
> +		free_swap_cache(src);
>  		folio_putback_lru(src);
>  	}
>  }
> diff --git a/mm/memory.c b/mm/memory.c
> index e35328c2f76e..2611d0fa4465 100644
> --- a/mm/memory.c
> +++ b/mm/memory.c
> @@ -3194,7 +3194,7 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf)
>  		folio_put(new_folio);
>  	if (old_folio) {
>  		if (page_copied)
> -			free_swap_cache(&old_folio->page);
> +			free_swap_cache(old_folio);
>  		folio_put(old_folio);
>  	}
>  
> diff --git a/mm/swap_state.c b/mm/swap_state.c
> index f68ddeb93698..e6b4a00d3655 100644
> --- a/mm/swap_state.c
> +++ b/mm/swap_state.c
> @@ -282,10 +282,8 @@ void clear_shadow_from_swap_cache(int type, unsigned long begin,
>   * folio_free_swap() _with_ the lock.
>   * 					- Marcelo
>   */
> -void free_swap_cache(struct page *page)
> +void free_swap_cache(struct folio *folio)
>  {
> -	struct folio *folio = page_folio(page);
> -
>  	if (folio_test_swapcache(folio) && !folio_mapped(folio) &&
>  	    folio_trylock(folio)) {
>  		folio_free_swap(folio);
> @@ -299,9 +297,11 @@ void free_swap_cache(struct page *page)
>   */
>  void free_page_and_swap_cache(struct page *page)
>  {
> -	free_swap_cache(page);
> +	struct folio *folio = page_folio(page);
> +
> +	free_swap_cache(folio);
>  	if (!is_huge_zero_page(page))
> -		put_page(page);
> +		folio_put(folio);
>  }
>  
>  /*
> @@ -316,7 +316,7 @@ void free_pages_and_swap_cache(struct encoded_page **pages, int nr)
>  	folio_batch_init(&folios);
>  	for (int i = 0; i < nr; i++) {
>  		struct folio *folio = page_folio(encoded_page_ptr(pages[i]));
> -		free_swap_cache(&folio->page);
> +		free_swap_cache(folio);
>  		if (folio_batch_add(&folios, folio) == 0)
>  			folios_put(&folios);
>  	}
diff mbox series

Patch

diff --git a/include/linux/swap.h b/include/linux/swap.h
index 493487ed7c38..3536595e3bda 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -449,9 +449,9 @@  static inline unsigned long total_swapcache_pages(void)
 	return global_node_page_state(NR_SWAPCACHE);
 }
 
-extern void free_swap_cache(struct page *page);
-extern void free_page_and_swap_cache(struct page *);
-extern void free_pages_and_swap_cache(struct encoded_page **, int);
+void free_swap_cache(struct folio *folio);
+void free_page_and_swap_cache(struct page *);
+void free_pages_and_swap_cache(struct encoded_page **, int);
 /* linux/mm/swapfile.c */
 extern atomic_long_t nr_swap_pages;
 extern long total_swap_pages;
@@ -534,7 +534,7 @@  static inline void put_swap_device(struct swap_info_struct *si)
 /* used to sanity check ptes in zap_pte_range when CONFIG_SWAP=0 */
 #define free_swap_and_cache(e) is_pfn_swap_entry(e)
 
-static inline void free_swap_cache(struct page *page)
+static inline void free_swap_cache(struct folio *folio)
 {
 }
 
diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index afc94c281035..7b83bb6a1199 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -725,7 +725,7 @@  static void __collapse_huge_page_copy_succeeded(pte_t *pte,
 		node_stat_sub_folio(src, NR_ISOLATED_ANON +
 				folio_is_file_lru(src));
 		folio_unlock(src);
-		free_swap_cache(&src->page);
+		free_swap_cache(src);
 		folio_putback_lru(src);
 	}
 }
diff --git a/mm/memory.c b/mm/memory.c
index e35328c2f76e..2611d0fa4465 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -3194,7 +3194,7 @@  static vm_fault_t wp_page_copy(struct vm_fault *vmf)
 		folio_put(new_folio);
 	if (old_folio) {
 		if (page_copied)
-			free_swap_cache(&old_folio->page);
+			free_swap_cache(old_folio);
 		folio_put(old_folio);
 	}
 
diff --git a/mm/swap_state.c b/mm/swap_state.c
index f68ddeb93698..e6b4a00d3655 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -282,10 +282,8 @@  void clear_shadow_from_swap_cache(int type, unsigned long begin,
  * folio_free_swap() _with_ the lock.
  * 					- Marcelo
  */
-void free_swap_cache(struct page *page)
+void free_swap_cache(struct folio *folio)
 {
-	struct folio *folio = page_folio(page);
-
 	if (folio_test_swapcache(folio) && !folio_mapped(folio) &&
 	    folio_trylock(folio)) {
 		folio_free_swap(folio);
@@ -299,9 +297,11 @@  void free_swap_cache(struct page *page)
  */
 void free_page_and_swap_cache(struct page *page)
 {
-	free_swap_cache(page);
+	struct folio *folio = page_folio(page);
+
+	free_swap_cache(folio);
 	if (!is_huge_zero_page(page))
-		put_page(page);
+		folio_put(folio);
 }
 
 /*
@@ -316,7 +316,7 @@  void free_pages_and_swap_cache(struct encoded_page **pages, int nr)
 	folio_batch_init(&folios);
 	for (int i = 0; i < nr; i++) {
 		struct folio *folio = page_folio(encoded_page_ptr(pages[i]));
-		free_swap_cache(&folio->page);
+		free_swap_cache(folio);
 		if (folio_batch_add(&folios, folio) == 0)
 			folios_put(&folios);
 	}