diff mbox series

[4/5] mm/zswap: cleanup zswap_load()

Message ID 20231213-zswap-dstmem-v1-4-896763369d04@bytedance.com (mailing list archive)
State New
Headers show
Series mm/zswap: dstmem reuse optimizations and cleanups | expand

Commit Message

Chengming Zhou Dec. 13, 2023, 4:18 a.m. UTC
After the common decompress part goes to __zswap_load(), we can cleanup
the zswap_load() a little.

Signed-off-by: Chengming Zhou <zhouchengming@bytedance.com>
---
 mm/zswap.c | 12 ++++--------
 1 file changed, 4 insertions(+), 8 deletions(-)

Comments

Yosry Ahmed Dec. 14, 2023, 12:56 a.m. UTC | #1
On Tue, Dec 12, 2023 at 8:18 PM Chengming Zhou
<zhouchengming@bytedance.com> wrote:
>
> After the common decompress part goes to __zswap_load(), we can cleanup
> the zswap_load() a little.
>
> Signed-off-by: Chengming Zhou <zhouchengming@bytedance.com>

LGTM, I think it can be squashed into the patch creating
__zswap_load(), but it doesn't matter much.

Reviewed-by: Yosry Ahmed <yosryahmed@google.com>

> ---
>  mm/zswap.c | 12 ++++--------
>  1 file changed, 4 insertions(+), 8 deletions(-)
>
> diff --git a/mm/zswap.c b/mm/zswap.c
> index 2f095c919a5c..0476e1c553c2 100644
> --- a/mm/zswap.c
> +++ b/mm/zswap.c
> @@ -1765,7 +1765,6 @@ bool zswap_load(struct folio *folio)
>         struct zswap_tree *tree = zswap_trees[type];
>         struct zswap_entry *entry;
>         u8 *dst;
> -       bool ret;
>
>         VM_WARN_ON_ONCE(!folio_test_locked(folio));
>
> @@ -1782,19 +1781,16 @@ bool zswap_load(struct folio *folio)
>                 dst = kmap_local_page(page);
>                 zswap_fill_page(dst, entry->value);
>                 kunmap_local(dst);
> -               ret = true;
> -               goto stats;
> +       } else {
> +               __zswap_load(entry, page);
>         }
>
> -       __zswap_load(entry, page);
> -       ret = true;
> -stats:
>         count_vm_event(ZSWPIN);
>         if (entry->objcg)
>                 count_objcg_event(entry->objcg, ZSWPIN);
>
>         spin_lock(&tree->lock);
> -       if (ret && zswap_exclusive_loads_enabled) {
> +       if (zswap_exclusive_loads_enabled) {
>                 zswap_invalidate_entry(tree, entry);
>                 folio_mark_dirty(folio);
>         } else if (entry->length) {
> @@ -1804,7 +1800,7 @@ bool zswap_load(struct folio *folio)
>         zswap_entry_put(tree, entry);
>         spin_unlock(&tree->lock);
>
> -       return ret;
> +       return true;
>  }
>
>  void zswap_invalidate(int type, pgoff_t offset)
>
> --
> b4 0.10.1
diff mbox series

Patch

diff --git a/mm/zswap.c b/mm/zswap.c
index 2f095c919a5c..0476e1c553c2 100644
--- a/mm/zswap.c
+++ b/mm/zswap.c
@@ -1765,7 +1765,6 @@  bool zswap_load(struct folio *folio)
 	struct zswap_tree *tree = zswap_trees[type];
 	struct zswap_entry *entry;
 	u8 *dst;
-	bool ret;
 
 	VM_WARN_ON_ONCE(!folio_test_locked(folio));
 
@@ -1782,19 +1781,16 @@  bool zswap_load(struct folio *folio)
 		dst = kmap_local_page(page);
 		zswap_fill_page(dst, entry->value);
 		kunmap_local(dst);
-		ret = true;
-		goto stats;
+	} else {
+		__zswap_load(entry, page);
 	}
 
-	__zswap_load(entry, page);
-	ret = true;
-stats:
 	count_vm_event(ZSWPIN);
 	if (entry->objcg)
 		count_objcg_event(entry->objcg, ZSWPIN);
 
 	spin_lock(&tree->lock);
-	if (ret && zswap_exclusive_loads_enabled) {
+	if (zswap_exclusive_loads_enabled) {
 		zswap_invalidate_entry(tree, entry);
 		folio_mark_dirty(folio);
 	} else if (entry->length) {
@@ -1804,7 +1800,7 @@  bool zswap_load(struct folio *folio)
 	zswap_entry_put(tree, entry);
 	spin_unlock(&tree->lock);
 
-	return ret;
+	return true;
 }
 
 void zswap_invalidate(int type, pgoff_t offset)