diff mbox series

[mm-hotfixes] mm/zswap: refactor zswap_store_page()

Message ID 20250131082037.2426-1-42.hyeyoo@gmail.com (mailing list archive)
State New
Headers show
Series [mm-hotfixes] mm/zswap: refactor zswap_store_page() | expand

Commit Message

Harry (Hyeonggon) Yoo Jan. 31, 2025, 8:20 a.m. UTC
Change zswap_store_page() to return a boolean value, where true
indicates the page is successfully stored in zswap, and false otherwise.

Since zswap_store_page() no longer returns the size of the entry,
remove 'bytes' variable from zswap_store(), and jump to the put_pool label
when it fails.

Move the objcg charging and the incrementing of zswap_store_pages
just after the tree store is successful.
At that point, there's no chance of failure, and the freeing path
will certainly uncharge zswap memory and decrement the counter.

Suggested-by: Yosry Ahmed <yosry.ahmed@linux.dev>
Signed-off-by: Hyeonggon Yoo <42.hyeyoo@gmail.com>
---
 mm/zswap.c | 31 +++++++++++++++----------------
 1 file changed, 15 insertions(+), 16 deletions(-)

Comments

Harry (Hyeonggon) Yoo Jan. 31, 2025, 8:24 a.m. UTC | #1
On Fri, Jan 31, 2025 at 5:21 PM Hyeonggon Yoo <42.hyeyoo@gmail.com> wrote:
>
> Change zswap_store_page() to return a boolean value, where true
> indicates the page is successfully stored in zswap, and false otherwise.
>
> Since zswap_store_page() no longer returns the size of the entry,
> remove 'bytes' variable from zswap_store(), and jump to the put_pool label
> when it fails.
>
> Move the objcg charging and the incrementing of zswap_store_pages
> just after the tree store is successful.
> At that point, there's no chance of failure, and the freeing path
> will certainly uncharge zswap memory and decrement the counter.
>
> Suggested-by: Yosry Ahmed <yosry.ahmed@linux.dev>
> Signed-off-by: Hyeonggon Yoo <42.hyeyoo@gmail.com>
> ---

Uh, I was trying to send it in-reply-to this:
https://lore.kernel.org/linux-mm/20250129100844.2935-1-42.hyeyoo@gmail.com

Andrew, could you please fold it into the patch?
No major functional difference, just adjusted Yosry's comments.

>  mm/zswap.c | 31 +++++++++++++++----------------
>  1 file changed, 15 insertions(+), 16 deletions(-)
>
> diff --git a/mm/zswap.c b/mm/zswap.c
> index f0bd962bffd5..ac9d299e7d0c 100644
> --- a/mm/zswap.c
> +++ b/mm/zswap.c
> @@ -1445,9 +1445,9 @@ static void shrink_worker(struct work_struct *w)
>  * main API
>  **********************************/
>
> -static ssize_t zswap_store_page(struct page *page,
> -                               struct obj_cgroup *objcg,
> -                               struct zswap_pool *pool)
> +static bool zswap_store_page(struct page *page,
> +                            struct obj_cgroup *objcg,
> +                            struct zswap_pool *pool)
>  {
>         swp_entry_t page_swpentry = page_swap_entry(page);
>         struct zswap_entry *entry, *old;
> @@ -1456,7 +1456,7 @@ static ssize_t zswap_store_page(struct page *page,
>         entry = zswap_entry_cache_alloc(GFP_KERNEL, page_to_nid(page));
>         if (!entry) {
>                 zswap_reject_kmemcache_fail++;
> -               return -EINVAL;
> +               return false;
>         }
>
>         if (!zswap_compress(page, entry, pool))
> @@ -1483,13 +1483,17 @@ static ssize_t zswap_store_page(struct page *page,
>
>         /*
>          * The entry is successfully compressed and stored in the tree, there is
> -        * no further possibility of failure. Grab refs to the pool and objcg.
> -        * These refs will be dropped by zswap_entry_free() when the entry is
> -        * removed from the tree.
> +        * no further possibility of failure. Grab refs to the pool and objcg,
> +        * charge zswap memory, and increment zswap_stored_pages.
> +        * The opposite actions will be performed by zswap_entry_free()
> +        * when the entry is removed from the tree.
>          */
>         zswap_pool_get(pool);
> -       if (objcg)
> +       if (objcg) {
>                 obj_cgroup_get(objcg);
> +               obj_cgroup_charge_zswap(objcg, entry->length);
> +       }
> +       atomic_long_inc(&zswap_stored_pages);
>
>         /*
>          * We finish initializing the entry while it's already in xarray.
> @@ -1504,22 +1508,19 @@ static ssize_t zswap_store_page(struct page *page,
>         entry->pool = pool;
>         entry->swpentry = page_swpentry;
>         entry->objcg = objcg;
> -       if (objcg)
> -               obj_cgroup_charge_zswap(objcg, entry->length);
>         entry->referenced = true;
>         if (entry->length) {
>                 INIT_LIST_HEAD(&entry->lru);
>                 zswap_lru_add(&zswap_list_lru, entry);
>         }
> -       atomic_long_inc(&zswap_stored_pages);
>
> -       return entry->length;
> +       return true;
>
>  store_failed:
>         zpool_free(pool->zpool, entry->handle);
>  compress_failed:
>         zswap_entry_cache_free(entry);
> -       return -EINVAL;
> +       return false;
>  }
>
>  bool zswap_store(struct folio *folio)
> @@ -1566,10 +1567,8 @@ bool zswap_store(struct folio *folio)
>
>         for (index = 0; index < nr_pages; ++index) {
>                 struct page *page = folio_page(folio, index);
> -               ssize_t bytes;
>
> -               bytes = zswap_store_page(page, objcg, pool);
> -               if (bytes < 0)
> +               if (!zswap_store_page(page, objcg, pool))
>                         goto put_pool;
>         }
>
> --
> 2.47.1
>
diff mbox series

Patch

diff --git a/mm/zswap.c b/mm/zswap.c
index f0bd962bffd5..ac9d299e7d0c 100644
--- a/mm/zswap.c
+++ b/mm/zswap.c
@@ -1445,9 +1445,9 @@  static void shrink_worker(struct work_struct *w)
 * main API
 **********************************/
 
-static ssize_t zswap_store_page(struct page *page,
-				struct obj_cgroup *objcg,
-				struct zswap_pool *pool)
+static bool zswap_store_page(struct page *page,
+			     struct obj_cgroup *objcg,
+			     struct zswap_pool *pool)
 {
 	swp_entry_t page_swpentry = page_swap_entry(page);
 	struct zswap_entry *entry, *old;
@@ -1456,7 +1456,7 @@  static ssize_t zswap_store_page(struct page *page,
 	entry = zswap_entry_cache_alloc(GFP_KERNEL, page_to_nid(page));
 	if (!entry) {
 		zswap_reject_kmemcache_fail++;
-		return -EINVAL;
+		return false;
 	}
 
 	if (!zswap_compress(page, entry, pool))
@@ -1483,13 +1483,17 @@  static ssize_t zswap_store_page(struct page *page,
 
 	/*
 	 * The entry is successfully compressed and stored in the tree, there is
-	 * no further possibility of failure. Grab refs to the pool and objcg.
-	 * These refs will be dropped by zswap_entry_free() when the entry is
-	 * removed from the tree.
+	 * no further possibility of failure. Grab refs to the pool and objcg,
+	 * charge zswap memory, and increment zswap_stored_pages.
+	 * The opposite actions will be performed by zswap_entry_free()
+	 * when the entry is removed from the tree.
 	 */
 	zswap_pool_get(pool);
-	if (objcg)
+	if (objcg) {
 		obj_cgroup_get(objcg);
+		obj_cgroup_charge_zswap(objcg, entry->length);
+	}
+	atomic_long_inc(&zswap_stored_pages);
 
 	/*
 	 * We finish initializing the entry while it's already in xarray.
@@ -1504,22 +1508,19 @@  static ssize_t zswap_store_page(struct page *page,
 	entry->pool = pool;
 	entry->swpentry = page_swpentry;
 	entry->objcg = objcg;
-	if (objcg)
-		obj_cgroup_charge_zswap(objcg, entry->length);
 	entry->referenced = true;
 	if (entry->length) {
 		INIT_LIST_HEAD(&entry->lru);
 		zswap_lru_add(&zswap_list_lru, entry);
 	}
-	atomic_long_inc(&zswap_stored_pages);
 
-	return entry->length;
+	return true;
 
 store_failed:
 	zpool_free(pool->zpool, entry->handle);
 compress_failed:
 	zswap_entry_cache_free(entry);
-	return -EINVAL;
+	return false;
 }
 
 bool zswap_store(struct folio *folio)
@@ -1566,10 +1567,8 @@  bool zswap_store(struct folio *folio)
 
 	for (index = 0; index < nr_pages; ++index) {
 		struct page *page = folio_page(folio, index);
-		ssize_t bytes;
 
-		bytes = zswap_store_page(page, objcg, pool);
-		if (bytes < 0)
+		if (!zswap_store_page(page, objcg, pool))
 			goto put_pool;
 	}