diff mbox series

[RFC,v1,1/2] mm: swap: Remove CLUSTER_FLAG_HUGE from swap_cluster_info:flags

Message ID 20231010142111.3997780-2-ryan.roberts@arm.com (mailing list archive)
State New
Headers show
Series Swap-out small-sized THP without splitting | expand

Commit Message

Ryan Roberts Oct. 10, 2023, 2:21 p.m. UTC
As preparation for supporting small-sized THP in the swap-out path,
without first needing to split to order-0, Remove the CLUSTER_FLAG_HUGE,
which, when present, always implies PMD-sized THP, which is the same as
the cluster size.

The only use of the flag was to determine whether a swap entry refers to
a single page or a PMD-sized THP in swap_page_trans_huge_swapped().
Instead of relying on the flag, we now pass in nr_pages, which
originates from the folio's number of pages. This allows the logic to
work for folios of any order.

The one snag is that one of the swap_page_trans_huge_swapped() call
sites does not have the folio. But it was only being called there to
avoid bothering to call __try_to_reclaim_swap() in some cases.
__try_to_reclaim_swap() gets the folio and (via some other functions)
calls swap_page_trans_huge_swapped(). So I've removed the problematic
call site and believe the new logic should be equivalent.

Removing CLUSTER_FLAG_HUGE also means we can remove split_swap_cluster()
which used to be called during folio splitting, since
split_swap_cluster()'s only job was to remove the flag.

Signed-off-by: Ryan Roberts <ryan.roberts@arm.com>
---
 include/linux/swap.h | 10 ----------
 mm/huge_memory.c     |  3 ---
 mm/swapfile.c        | 47 ++++++++------------------------------------
 3 files changed, 8 insertions(+), 52 deletions(-)

--
2.25.1

Comments

Huang, Ying Oct. 11, 2023, 7:43 a.m. UTC | #1
Ryan Roberts <ryan.roberts@arm.com> writes:

> As preparation for supporting small-sized THP in the swap-out path,
> without first needing to split to order-0, Remove the CLUSTER_FLAG_HUGE,
> which, when present, always implies PMD-sized THP, which is the same as
> the cluster size.
>
> The only use of the flag was to determine whether a swap entry refers to
> a single page or a PMD-sized THP in swap_page_trans_huge_swapped().
> Instead of relying on the flag, we now pass in nr_pages, which
> originates from the folio's number of pages. This allows the logic to
> work for folios of any order.
>
> The one snag is that one of the swap_page_trans_huge_swapped() call
> sites does not have the folio. But it was only being called there to
> avoid bothering to call __try_to_reclaim_swap() in some cases.
> __try_to_reclaim_swap() gets the folio and (via some other functions)
> calls swap_page_trans_huge_swapped(). So I've removed the problematic
> call site and believe the new logic should be equivalent.

I believe this should be OK.  Better to compare the performance too.

> Removing CLUSTER_FLAG_HUGE also means we can remove split_swap_cluster()
> which used to be called during folio splitting, since
> split_swap_cluster()'s only job was to remove the flag.
>

--
Best Regards,
Huang, Ying
Kefeng Wang Oct. 11, 2023, 8:17 a.m. UTC | #2
On 2023/10/10 22:21, Ryan Roberts wrote:
> As preparation for supporting small-sized THP in the swap-out path,
> without first needing to split to order-0, Remove the CLUSTER_FLAG_HUGE,
> which, when present, always implies PMD-sized THP, which is the same as
> the cluster size.
> 
> The only use of the flag was to determine whether a swap entry refers to
> a single page or a PMD-sized THP in swap_page_trans_huge_swapped().
> Instead of relying on the flag, we now pass in nr_pages, which
> originates from the folio's number of pages. This allows the logic to
> work for folios of any order.
> 
> The one snag is that one of the swap_page_trans_huge_swapped() call
> sites does not have the folio. But it was only being called there to
> avoid bothering to call __try_to_reclaim_swap() in some cases.
> __try_to_reclaim_swap() gets the folio and (via some other functions)
> calls swap_page_trans_huge_swapped(). So I've removed the problematic
> call site and believe the new logic should be equivalent.
> 
> Removing CLUSTER_FLAG_HUGE also means we can remove split_swap_cluster()
> which used to be called during folio splitting, since
> split_swap_cluster()'s only job was to remove the flag.
> 
> Signed-off-by: Ryan Roberts <ryan.roberts@arm.com>
> ---
>   include/linux/swap.h | 10 ----------
>   mm/huge_memory.c     |  3 ---
>   mm/swapfile.c        | 47 ++++++++------------------------------------
>   3 files changed, 8 insertions(+), 52 deletions(-)
> 
> diff --git a/include/linux/swap.h b/include/linux/swap.h
> index 19f30a29e1f1..a073366a227c 100644
> --- a/include/linux/swap.h
> +++ b/include/linux/swap.h
> @@ -259,7 +259,6 @@ struct swap_cluster_info {
>   };
>   #define CLUSTER_FLAG_FREE 1 /* This cluster is free */
>   #define CLUSTER_FLAG_NEXT_NULL 2 /* This cluster has no next cluster */
> -#define CLUSTER_FLAG_HUGE 4 /* This cluster is backing a transparent huge page */
> 
>   /*
>    * We assign a cluster to each CPU, so each CPU can allocate swap entry from
> @@ -595,15 +594,6 @@ static inline int add_swap_extent(struct swap_info_struct *sis,
>   }
>   #endif /* CONFIG_SWAP */
> 
> -#ifdef CONFIG_THP_SWAP
> -extern int split_swap_cluster(swp_entry_t entry);
> -#else
> -static inline int split_swap_cluster(swp_entry_t entry)
> -{
> -	return 0;
> -}
> -#endif
> -
>   #ifdef CONFIG_MEMCG
>   static inline int mem_cgroup_swappiness(struct mem_cgroup *memcg)
>   {
> diff --git a/mm/huge_memory.c b/mm/huge_memory.c
> index c9cbcbf6697e..46b3fb943207 100644
> --- a/mm/huge_memory.c
> +++ b/mm/huge_memory.c
> @@ -2597,9 +2597,6 @@ static void __split_huge_page(struct page *page, struct list_head *list,
>   		shmem_uncharge(head->mapping->host, nr_dropped);
>   	remap_page(folio, nr);
> 
> -	if (folio_test_swapcache(folio))
> -		split_swap_cluster(folio->swap);
> -
>   	for (i = 0; i < nr; i++) {
>   		struct page *subpage = head + i;
>   		if (subpage == page)
> diff --git a/mm/swapfile.c b/mm/swapfile.c
> index e52f486834eb..c668838fa660 100644
> --- a/mm/swapfile.c
> +++ b/mm/swapfile.c
> @@ -342,18 +342,6 @@ static inline void cluster_set_null(struct swap_cluster_info *info)
>   	info->data = 0;
>   }
> 
> -static inline bool cluster_is_huge(struct swap_cluster_info *info)
> -{
> -	if (IS_ENABLED(CONFIG_THP_SWAP))
> -		return info->flags & CLUSTER_FLAG_HUGE;
> -	return false;
> -}
> -
> -static inline void cluster_clear_huge(struct swap_cluster_info *info)
> -{
> -	info->flags &= ~CLUSTER_FLAG_HUGE;
> -}
> -
>   static inline struct swap_cluster_info *lock_cluster(struct swap_info_struct *si,
>   						     unsigned long offset)
>   {
> @@ -1021,7 +1009,7 @@ static int swap_alloc_cluster(struct swap_info_struct *si, swp_entry_t *slot)
>   	offset = idx * SWAPFILE_CLUSTER;
>   	ci = lock_cluster(si, offset);
>   	alloc_cluster(si, idx);
> -	cluster_set_count_flag(ci, SWAPFILE_CLUSTER, CLUSTER_FLAG_HUGE);
> +	cluster_set_count_flag(ci, SWAPFILE_CLUSTER, 0);

Maybe just use cluster_set_count() and kill cluster_set_count_flag().
Ryan Roberts Oct. 11, 2023, 10:15 a.m. UTC | #3
On 11/10/2023 09:17, Kefeng Wang wrote:
> 
> 
> On 2023/10/10 22:21, Ryan Roberts wrote:
>> As preparation for supporting small-sized THP in the swap-out path,
>> without first needing to split to order-0, Remove the CLUSTER_FLAG_HUGE,
>> which, when present, always implies PMD-sized THP, which is the same as
>> the cluster size.
>>
>> The only use of the flag was to determine whether a swap entry refers to
>> a single page or a PMD-sized THP in swap_page_trans_huge_swapped().
>> Instead of relying on the flag, we now pass in nr_pages, which
>> originates from the folio's number of pages. This allows the logic to
>> work for folios of any order.
>>
>> The one snag is that one of the swap_page_trans_huge_swapped() call
>> sites does not have the folio. But it was only being called there to
>> avoid bothering to call __try_to_reclaim_swap() in some cases.
>> __try_to_reclaim_swap() gets the folio and (via some other functions)
>> calls swap_page_trans_huge_swapped(). So I've removed the problematic
>> call site and believe the new logic should be equivalent.
>>
>> Removing CLUSTER_FLAG_HUGE also means we can remove split_swap_cluster()
>> which used to be called during folio splitting, since
>> split_swap_cluster()'s only job was to remove the flag.
>>
>> Signed-off-by: Ryan Roberts <ryan.roberts@arm.com>
>> ---
>>   include/linux/swap.h | 10 ----------
>>   mm/huge_memory.c     |  3 ---
>>   mm/swapfile.c        | 47 ++++++++------------------------------------
>>   3 files changed, 8 insertions(+), 52 deletions(-)
>>
>> diff --git a/include/linux/swap.h b/include/linux/swap.h
>> index 19f30a29e1f1..a073366a227c 100644
>> --- a/include/linux/swap.h
>> +++ b/include/linux/swap.h
>> @@ -259,7 +259,6 @@ struct swap_cluster_info {
>>   };
>>   #define CLUSTER_FLAG_FREE 1 /* This cluster is free */
>>   #define CLUSTER_FLAG_NEXT_NULL 2 /* This cluster has no next cluster */
>> -#define CLUSTER_FLAG_HUGE 4 /* This cluster is backing a transparent huge
>> page */
>>
>>   /*
>>    * We assign a cluster to each CPU, so each CPU can allocate swap entry from
>> @@ -595,15 +594,6 @@ static inline int add_swap_extent(struct swap_info_struct
>> *sis,
>>   }
>>   #endif /* CONFIG_SWAP */
>>
>> -#ifdef CONFIG_THP_SWAP
>> -extern int split_swap_cluster(swp_entry_t entry);
>> -#else
>> -static inline int split_swap_cluster(swp_entry_t entry)
>> -{
>> -    return 0;
>> -}
>> -#endif
>> -
>>   #ifdef CONFIG_MEMCG
>>   static inline int mem_cgroup_swappiness(struct mem_cgroup *memcg)
>>   {
>> diff --git a/mm/huge_memory.c b/mm/huge_memory.c
>> index c9cbcbf6697e..46b3fb943207 100644
>> --- a/mm/huge_memory.c
>> +++ b/mm/huge_memory.c
>> @@ -2597,9 +2597,6 @@ static void __split_huge_page(struct page *page, struct
>> list_head *list,
>>           shmem_uncharge(head->mapping->host, nr_dropped);
>>       remap_page(folio, nr);
>>
>> -    if (folio_test_swapcache(folio))
>> -        split_swap_cluster(folio->swap);
>> -
>>       for (i = 0; i < nr; i++) {
>>           struct page *subpage = head + i;
>>           if (subpage == page)
>> diff --git a/mm/swapfile.c b/mm/swapfile.c
>> index e52f486834eb..c668838fa660 100644
>> --- a/mm/swapfile.c
>> +++ b/mm/swapfile.c
>> @@ -342,18 +342,6 @@ static inline void cluster_set_null(struct
>> swap_cluster_info *info)
>>       info->data = 0;
>>   }
>>
>> -static inline bool cluster_is_huge(struct swap_cluster_info *info)
>> -{
>> -    if (IS_ENABLED(CONFIG_THP_SWAP))
>> -        return info->flags & CLUSTER_FLAG_HUGE;
>> -    return false;
>> -}
>> -
>> -static inline void cluster_clear_huge(struct swap_cluster_info *info)
>> -{
>> -    info->flags &= ~CLUSTER_FLAG_HUGE;
>> -}
>> -
>>   static inline struct swap_cluster_info *lock_cluster(struct swap_info_struct
>> *si,
>>                                unsigned long offset)
>>   {
>> @@ -1021,7 +1009,7 @@ static int swap_alloc_cluster(struct swap_info_struct
>> *si, swp_entry_t *slot)
>>       offset = idx * SWAPFILE_CLUSTER;
>>       ci = lock_cluster(si, offset);
>>       alloc_cluster(si, idx);
>> -    cluster_set_count_flag(ci, SWAPFILE_CLUSTER, CLUSTER_FLAG_HUGE);
>> +    cluster_set_count_flag(ci, SWAPFILE_CLUSTER, 0);
> 
> Maybe just use cluster_set_count() and kill cluster_set_count_flag().

Yep good point. I'll do this in the next version - thanks!
Ryan Roberts Oct. 11, 2023, 10:16 a.m. UTC | #4
On 11/10/2023 09:17, Kefeng Wang wrote:
> 
> 
> On 2023/10/10 22:21, Ryan Roberts wrote:
>> As preparation for supporting small-sized THP in the swap-out path,
>> without first needing to split to order-0, Remove the CLUSTER_FLAG_HUGE,
>> which, when present, always implies PMD-sized THP, which is the same as
>> the cluster size.
>>
>> The only use of the flag was to determine whether a swap entry refers to
>> a single page or a PMD-sized THP in swap_page_trans_huge_swapped().
>> Instead of relying on the flag, we now pass in nr_pages, which
>> originates from the folio's number of pages. This allows the logic to
>> work for folios of any order.
>>
>> The one snag is that one of the swap_page_trans_huge_swapped() call
>> sites does not have the folio. But it was only being called there to
>> avoid bothering to call __try_to_reclaim_swap() in some cases.
>> __try_to_reclaim_swap() gets the folio and (via some other functions)
>> calls swap_page_trans_huge_swapped(). So I've removed the problematic
>> call site and believe the new logic should be equivalent.
>>
>> Removing CLUSTER_FLAG_HUGE also means we can remove split_swap_cluster()
>> which used to be called during folio splitting, since
>> split_swap_cluster()'s only job was to remove the flag.
>>
>> Signed-off-by: Ryan Roberts <ryan.roberts@arm.com>
>> ---
>>   include/linux/swap.h | 10 ----------
>>   mm/huge_memory.c     |  3 ---
>>   mm/swapfile.c        | 47 ++++++++------------------------------------
>>   3 files changed, 8 insertions(+), 52 deletions(-)
>>
>> diff --git a/include/linux/swap.h b/include/linux/swap.h
>> index 19f30a29e1f1..a073366a227c 100644
>> --- a/include/linux/swap.h
>> +++ b/include/linux/swap.h
>> @@ -259,7 +259,6 @@ struct swap_cluster_info {
>>   };
>>   #define CLUSTER_FLAG_FREE 1 /* This cluster is free */
>>   #define CLUSTER_FLAG_NEXT_NULL 2 /* This cluster has no next cluster */
>> -#define CLUSTER_FLAG_HUGE 4 /* This cluster is backing a transparent huge
>> page */
>>
>>   /*
>>    * We assign a cluster to each CPU, so each CPU can allocate swap entry from
>> @@ -595,15 +594,6 @@ static inline int add_swap_extent(struct swap_info_struct
>> *sis,
>>   }
>>   #endif /* CONFIG_SWAP */
>>
>> -#ifdef CONFIG_THP_SWAP
>> -extern int split_swap_cluster(swp_entry_t entry);
>> -#else
>> -static inline int split_swap_cluster(swp_entry_t entry)
>> -{
>> -    return 0;
>> -}
>> -#endif
>> -
>>   #ifdef CONFIG_MEMCG
>>   static inline int mem_cgroup_swappiness(struct mem_cgroup *memcg)
>>   {
>> diff --git a/mm/huge_memory.c b/mm/huge_memory.c
>> index c9cbcbf6697e..46b3fb943207 100644
>> --- a/mm/huge_memory.c
>> +++ b/mm/huge_memory.c
>> @@ -2597,9 +2597,6 @@ static void __split_huge_page(struct page *page, struct
>> list_head *list,
>>           shmem_uncharge(head->mapping->host, nr_dropped);
>>       remap_page(folio, nr);
>>
>> -    if (folio_test_swapcache(folio))
>> -        split_swap_cluster(folio->swap);
>> -
>>       for (i = 0; i < nr; i++) {
>>           struct page *subpage = head + i;
>>           if (subpage == page)
>> diff --git a/mm/swapfile.c b/mm/swapfile.c
>> index e52f486834eb..c668838fa660 100644
>> --- a/mm/swapfile.c
>> +++ b/mm/swapfile.c
>> @@ -342,18 +342,6 @@ static inline void cluster_set_null(struct
>> swap_cluster_info *info)
>>       info->data = 0;
>>   }
>>
>> -static inline bool cluster_is_huge(struct swap_cluster_info *info)
>> -{
>> -    if (IS_ENABLED(CONFIG_THP_SWAP))
>> -        return info->flags & CLUSTER_FLAG_HUGE;
>> -    return false;
>> -}
>> -
>> -static inline void cluster_clear_huge(struct swap_cluster_info *info)
>> -{
>> -    info->flags &= ~CLUSTER_FLAG_HUGE;
>> -}
>> -
>>   static inline struct swap_cluster_info *lock_cluster(struct swap_info_struct
>> *si,
>>                                unsigned long offset)
>>   {
>> @@ -1021,7 +1009,7 @@ static int swap_alloc_cluster(struct swap_info_struct
>> *si, swp_entry_t *slot)
>>       offset = idx * SWAPFILE_CLUSTER;
>>       ci = lock_cluster(si, offset);
>>       alloc_cluster(si, idx);
>> -    cluster_set_count_flag(ci, SWAPFILE_CLUSTER, CLUSTER_FLAG_HUGE);
>> +    cluster_set_count_flag(ci, SWAPFILE_CLUSTER, 0);
> 
> Maybe just use cluster_set_count() and kill cluster_set_count_flag().

Yes, good point - I'll do this in the next version. Thanks!
diff mbox series

Patch

diff --git a/include/linux/swap.h b/include/linux/swap.h
index 19f30a29e1f1..a073366a227c 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -259,7 +259,6 @@  struct swap_cluster_info {
 };
 #define CLUSTER_FLAG_FREE 1 /* This cluster is free */
 #define CLUSTER_FLAG_NEXT_NULL 2 /* This cluster has no next cluster */
-#define CLUSTER_FLAG_HUGE 4 /* This cluster is backing a transparent huge page */

 /*
  * We assign a cluster to each CPU, so each CPU can allocate swap entry from
@@ -595,15 +594,6 @@  static inline int add_swap_extent(struct swap_info_struct *sis,
 }
 #endif /* CONFIG_SWAP */

-#ifdef CONFIG_THP_SWAP
-extern int split_swap_cluster(swp_entry_t entry);
-#else
-static inline int split_swap_cluster(swp_entry_t entry)
-{
-	return 0;
-}
-#endif
-
 #ifdef CONFIG_MEMCG
 static inline int mem_cgroup_swappiness(struct mem_cgroup *memcg)
 {
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index c9cbcbf6697e..46b3fb943207 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -2597,9 +2597,6 @@  static void __split_huge_page(struct page *page, struct list_head *list,
 		shmem_uncharge(head->mapping->host, nr_dropped);
 	remap_page(folio, nr);

-	if (folio_test_swapcache(folio))
-		split_swap_cluster(folio->swap);
-
 	for (i = 0; i < nr; i++) {
 		struct page *subpage = head + i;
 		if (subpage == page)
diff --git a/mm/swapfile.c b/mm/swapfile.c
index e52f486834eb..c668838fa660 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -342,18 +342,6 @@  static inline void cluster_set_null(struct swap_cluster_info *info)
 	info->data = 0;
 }

-static inline bool cluster_is_huge(struct swap_cluster_info *info)
-{
-	if (IS_ENABLED(CONFIG_THP_SWAP))
-		return info->flags & CLUSTER_FLAG_HUGE;
-	return false;
-}
-
-static inline void cluster_clear_huge(struct swap_cluster_info *info)
-{
-	info->flags &= ~CLUSTER_FLAG_HUGE;
-}
-
 static inline struct swap_cluster_info *lock_cluster(struct swap_info_struct *si,
 						     unsigned long offset)
 {
@@ -1021,7 +1009,7 @@  static int swap_alloc_cluster(struct swap_info_struct *si, swp_entry_t *slot)
 	offset = idx * SWAPFILE_CLUSTER;
 	ci = lock_cluster(si, offset);
 	alloc_cluster(si, idx);
-	cluster_set_count_flag(ci, SWAPFILE_CLUSTER, CLUSTER_FLAG_HUGE);
+	cluster_set_count_flag(ci, SWAPFILE_CLUSTER, 0);

 	memset(si->swap_map + offset, SWAP_HAS_CACHE, SWAPFILE_CLUSTER);
 	unlock_cluster(ci);
@@ -1354,7 +1342,6 @@  void put_swap_folio(struct folio *folio, swp_entry_t entry)

 	ci = lock_cluster_or_swap_info(si, offset);
 	if (size == SWAPFILE_CLUSTER) {
-		VM_BUG_ON(!cluster_is_huge(ci));
 		map = si->swap_map + offset;
 		for (i = 0; i < SWAPFILE_CLUSTER; i++) {
 			val = map[i];
@@ -1362,7 +1349,6 @@  void put_swap_folio(struct folio *folio, swp_entry_t entry)
 			if (val == SWAP_HAS_CACHE)
 				free_entries++;
 		}
-		cluster_clear_huge(ci);
 		if (free_entries == SWAPFILE_CLUSTER) {
 			unlock_cluster_or_swap_info(si, ci);
 			spin_lock(&si->lock);
@@ -1384,23 +1370,6 @@  void put_swap_folio(struct folio *folio, swp_entry_t entry)
 	unlock_cluster_or_swap_info(si, ci);
 }

-#ifdef CONFIG_THP_SWAP
-int split_swap_cluster(swp_entry_t entry)
-{
-	struct swap_info_struct *si;
-	struct swap_cluster_info *ci;
-	unsigned long offset = swp_offset(entry);
-
-	si = _swap_info_get(entry);
-	if (!si)
-		return -EBUSY;
-	ci = lock_cluster(si, offset);
-	cluster_clear_huge(ci);
-	unlock_cluster(ci);
-	return 0;
-}
-#endif
-
 static int swp_entry_cmp(const void *ent1, const void *ent2)
 {
 	const swp_entry_t *e1 = ent1, *e2 = ent2;
@@ -1508,22 +1477,23 @@  int swp_swapcount(swp_entry_t entry)
 }

 static bool swap_page_trans_huge_swapped(struct swap_info_struct *si,
-					 swp_entry_t entry)
+					 swp_entry_t entry,
+					 unsigned int nr_pages)
 {
 	struct swap_cluster_info *ci;
 	unsigned char *map = si->swap_map;
 	unsigned long roffset = swp_offset(entry);
-	unsigned long offset = round_down(roffset, SWAPFILE_CLUSTER);
+	unsigned long offset = round_down(roffset, nr_pages);
 	int i;
 	bool ret = false;

 	ci = lock_cluster_or_swap_info(si, offset);
-	if (!ci || !cluster_is_huge(ci)) {
+	if (!ci || nr_pages == 1) {
 		if (swap_count(map[roffset]))
 			ret = true;
 		goto unlock_out;
 	}
-	for (i = 0; i < SWAPFILE_CLUSTER; i++) {
+	for (i = 0; i < nr_pages; i++) {
 		if (swap_count(map[offset + i])) {
 			ret = true;
 			break;
@@ -1545,7 +1515,7 @@  static bool folio_swapped(struct folio *folio)
 	if (!IS_ENABLED(CONFIG_THP_SWAP) || likely(!folio_test_large(folio)))
 		return swap_swapcount(si, entry) != 0;

-	return swap_page_trans_huge_swapped(si, entry);
+	return swap_page_trans_huge_swapped(si, entry, folio_nr_pages(folio));
 }

 /**
@@ -1606,8 +1576,7 @@  int free_swap_and_cache(swp_entry_t entry)
 	p = _swap_info_get(entry);
 	if (p) {
 		count = __swap_entry_free(p, entry);
-		if (count == SWAP_HAS_CACHE &&
-		    !swap_page_trans_huge_swapped(p, entry))
+		if (count == SWAP_HAS_CACHE)
 			__try_to_reclaim_swap(p, swp_offset(entry),
 					      TTRS_UNMAPPED | TTRS_FULL);
 	}