diff mbox series

[2/7] mm, swap: drop the flag TTRS_DIRECT

Message ID 20250214175709.76029-3-ryncsn@gmail.com (mailing list archive)
State New
Headers show
Series mm, swap: remove swap slot cache | expand

Commit Message

Kairui Song Feb. 14, 2025, 5:57 p.m. UTC
From: Kairui Song <kasong@tencent.com>

This flag exists temporarily to allow the allocator to bypass the slot
cache during freeing, so reclaiming one slot will free the slot
immediately.

But now we have already removed slot cache usage on freeing, so this
flag has no effect now.

Signed-off-by: Kairui Song <kasong@tencent.com>
---
 mm/swapfile.c | 23 +++--------------------
 1 file changed, 3 insertions(+), 20 deletions(-)

Comments

Baoquan He Feb. 19, 2025, 2:42 a.m. UTC | #1
On 02/15/25 at 01:57am, Kairui Song wrote:
> From: Kairui Song <kasong@tencent.com>
> 
> This flag exists temporarily to allow the allocator to bypass the slot
> cache during freeing, so reclaiming one slot will free the slot
> immediately.
> 
> But now we have already removed slot cache usage on freeing, so this
> flag has no effect now.
> 
> Signed-off-by: Kairui Song <kasong@tencent.com>
> ---
>  mm/swapfile.c | 23 +++--------------------
>  1 file changed, 3 insertions(+), 20 deletions(-)
> 
> diff --git a/mm/swapfile.c b/mm/swapfile.c
> index c77ffee4af86..449e388a6fec 100644
> --- a/mm/swapfile.c
> +++ b/mm/swapfile.c

Reviewed-by: Baoquan He <bhe@redhaat.com>

> @@ -158,8 +158,6 @@ static long swap_usage_in_pages(struct swap_info_struct *si)
>  #define TTRS_UNMAPPED		0x2
>  /* Reclaim the swap entry if swap is getting full */
>  #define TTRS_FULL		0x4
> -/* Reclaim directly, bypass the slot cache and don't touch device lock */
> -#define TTRS_DIRECT		0x8
>  
>  static bool swap_only_has_cache(struct swap_info_struct *si,
>  			      unsigned long offset, int nr_pages)
> @@ -257,23 +255,8 @@ static int __try_to_reclaim_swap(struct swap_info_struct *si,
>  	if (!need_reclaim)
>  		goto out_unlock;
>  
> -	if (!(flags & TTRS_DIRECT)) {
> -		/* Free through slot cache */
> -		delete_from_swap_cache(folio);
> -		folio_set_dirty(folio);
> -		ret = nr_pages;
> -		goto out_unlock;
> -	}
> -
> -	xa_lock_irq(&address_space->i_pages);
> -	__delete_from_swap_cache(folio, entry, NULL);
> -	xa_unlock_irq(&address_space->i_pages);
> -	folio_ref_sub(folio, nr_pages);
> +	delete_from_swap_cache(folio);
>  	folio_set_dirty(folio);
> -
> -	ci = lock_cluster(si, offset);
> -	swap_entry_range_free(si, ci, entry, nr_pages);
> -	unlock_cluster(ci);
>  	ret = nr_pages;
>  out_unlock:
>  	folio_unlock(folio);
> @@ -707,7 +690,7 @@ static bool cluster_reclaim_range(struct swap_info_struct *si,
>  			offset++;
>  			break;
>  		case SWAP_HAS_CACHE:
> -			nr_reclaim = __try_to_reclaim_swap(si, offset, TTRS_ANYWAY | TTRS_DIRECT);
> +			nr_reclaim = __try_to_reclaim_swap(si, offset, TTRS_ANYWAY);
>  			if (nr_reclaim > 0)
>  				offset += nr_reclaim;
>  			else
> @@ -860,7 +843,7 @@ static void swap_reclaim_full_clusters(struct swap_info_struct *si, bool force)
>  			if (READ_ONCE(map[offset]) == SWAP_HAS_CACHE) {
>  				spin_unlock(&ci->lock);
>  				nr_reclaim = __try_to_reclaim_swap(si, offset,
> -								   TTRS_ANYWAY | TTRS_DIRECT);
> +								   TTRS_ANYWAY);
>  				spin_lock(&ci->lock);
>  				if (nr_reclaim) {
>  					offset += abs(nr_reclaim);
> -- 
> 2.48.1
>
diff mbox series

Patch

diff --git a/mm/swapfile.c b/mm/swapfile.c
index c77ffee4af86..449e388a6fec 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -158,8 +158,6 @@  static long swap_usage_in_pages(struct swap_info_struct *si)
 #define TTRS_UNMAPPED		0x2
 /* Reclaim the swap entry if swap is getting full */
 #define TTRS_FULL		0x4
-/* Reclaim directly, bypass the slot cache and don't touch device lock */
-#define TTRS_DIRECT		0x8
 
 static bool swap_only_has_cache(struct swap_info_struct *si,
 			      unsigned long offset, int nr_pages)
@@ -257,23 +255,8 @@  static int __try_to_reclaim_swap(struct swap_info_struct *si,
 	if (!need_reclaim)
 		goto out_unlock;
 
-	if (!(flags & TTRS_DIRECT)) {
-		/* Free through slot cache */
-		delete_from_swap_cache(folio);
-		folio_set_dirty(folio);
-		ret = nr_pages;
-		goto out_unlock;
-	}
-
-	xa_lock_irq(&address_space->i_pages);
-	__delete_from_swap_cache(folio, entry, NULL);
-	xa_unlock_irq(&address_space->i_pages);
-	folio_ref_sub(folio, nr_pages);
+	delete_from_swap_cache(folio);
 	folio_set_dirty(folio);
-
-	ci = lock_cluster(si, offset);
-	swap_entry_range_free(si, ci, entry, nr_pages);
-	unlock_cluster(ci);
 	ret = nr_pages;
 out_unlock:
 	folio_unlock(folio);
@@ -707,7 +690,7 @@  static bool cluster_reclaim_range(struct swap_info_struct *si,
 			offset++;
 			break;
 		case SWAP_HAS_CACHE:
-			nr_reclaim = __try_to_reclaim_swap(si, offset, TTRS_ANYWAY | TTRS_DIRECT);
+			nr_reclaim = __try_to_reclaim_swap(si, offset, TTRS_ANYWAY);
 			if (nr_reclaim > 0)
 				offset += nr_reclaim;
 			else
@@ -860,7 +843,7 @@  static void swap_reclaim_full_clusters(struct swap_info_struct *si, bool force)
 			if (READ_ONCE(map[offset]) == SWAP_HAS_CACHE) {
 				spin_unlock(&ci->lock);
 				nr_reclaim = __try_to_reclaim_swap(si, offset,
-								   TTRS_ANYWAY | TTRS_DIRECT);
+								   TTRS_ANYWAY);
 				spin_lock(&ci->lock);
 				if (nr_reclaim) {
 					offset += abs(nr_reclaim);