diff mbox series

[3/9] mm: swap: use __swap_entry_free() to free swap entry in swap_entry_put_locked()

Message ID 20250313210515.9920-4-shikemeng@huaweicloud.com (mailing list archive)
State New
Headers show
Series Minor cleanups and improvements to swap freeing code | expand

Commit Message

Kemeng Shi March 13, 2025, 9:05 p.m. UTC
In swap_entry_put_locked(), we will set slot to SWAP_HAS_CACHE before
using swap_entry_range_free to do actual swap entry freeing. This
introduce an unnecessary intermediate state.
By using __swap_entry_free() in swap_entry_put_locked(), we can eliminate
the need to set slot to SWAP_HAS_CACHE.
This change would make the behavior of swap_entry_put_locked() more
consistent with other put() operations which will do actual free work
after put last reference.

Signed-off-by: Kemeng Shi <shikemeng@huaweicloud.com>
---
 mm/swapfile.c | 28 ++++++++++++----------------
 1 file changed, 12 insertions(+), 16 deletions(-)
diff mbox series

Patch

diff --git a/mm/swapfile.c b/mm/swapfile.c
index 7c886f9dd6f9..ba37b9bff586 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -1367,9 +1367,11 @@  static inline void __swap_entries_free(struct swap_info_struct *si,
 }
 
 static unsigned char swap_entry_put_locked(struct swap_info_struct *si,
-					   unsigned long offset,
+					   struct swap_cluster_info *ci,
+					   swp_entry_t entry,
 					   unsigned char usage)
 {
+	unsigned long offset = swp_offset(entry);
 	unsigned char count;
 	unsigned char has_cache;
 
@@ -1398,10 +1400,9 @@  static unsigned char swap_entry_put_locked(struct swap_info_struct *si,
 	}
 
 	usage = count | has_cache;
-	if (usage)
-		WRITE_ONCE(si->swap_map[offset], usage);
-	else
-		WRITE_ONCE(si->swap_map[offset], SWAP_HAS_CACHE);
+	WRITE_ONCE(si->swap_map[offset], usage);
+	if (!usage)
+		__swap_entries_free(si, ci, entry, 1);
 
 	return usage;
 }
@@ -1480,9 +1481,7 @@  static unsigned char swap_entry_put(struct swap_info_struct *si,
 	unsigned char usage;
 
 	ci = lock_cluster(si, offset);
-	usage = swap_entry_put_locked(si, offset, 1);
-	if (!usage)
-		swap_entry_range_free(si, ci, swp_entry(si->type, offset), 1);
+	usage = swap_entry_put_locked(si, ci, entry, 1);
 	unlock_cluster(ci);
 
 	return usage;
@@ -1562,8 +1561,8 @@  static void cluster_swap_free_nr(struct swap_info_struct *si,
 
 	ci = lock_cluster(si, offset);
 	do {
-		if (!swap_entry_put_locked(si, offset, usage))
-			swap_entry_range_free(si, ci, swp_entry(si->type, offset), 1);
+		swap_entry_put_locked(si, ci, swp_entry(si->type, offset),
+				      usage);
 	} while (++offset < end);
 	unlock_cluster(ci);
 }
@@ -1607,12 +1606,9 @@  void put_swap_folio(struct folio *folio, swp_entry_t entry)
 	ci = lock_cluster(si, offset);
 	if (swap_only_has_cache(si, offset, size))
 		swap_entry_range_free(si, ci, entry, size);
-	else {
-		for (int i = 0; i < size; i++, entry.val++) {
-			if (!swap_entry_put_locked(si, offset + i, SWAP_HAS_CACHE))
-				swap_entry_range_free(si, ci, entry, 1);
-		}
-	}
+	else
+		for (int i = 0; i < size; i++, entry.val++)
+			swap_entry_put_locked(si, ci, entry, SWAP_HAS_CACHE);
 	unlock_cluster(ci);
 }