diff mbox series

[2/2] mm: zswap: remove nr_zswap_stored atomic

Message ID 20240320020823.337644-2-yosryahmed@google.com (mailing list archive)
State New
Headers show
Series [1/2] mm: zswap: increase shrinking protection for zswap swapins only | expand

Commit Message

Yosry Ahmed March 20, 2024, 2:08 a.m. UTC
zswap_nr_stored is used to maintain the number of stored pages in zswap
that are not same-filled pages. It is used in zswap_shrinker_count() to
scale the number of freeable compressed pages by the compression ratio.
That is, to reduce the amount of writeback from zswap with higher
compression ratios as the ROI from IO diminishes.

However, the need for this counter is questionable due to two reasons:
- It is redundant. The value can be inferred from (zswap_stored_pages -
  zswap_same_filled_pages).
- When memcgs are enabled, we use memcg_page_state(memcg,
  MEMCG_ZSWAPPED), which includes same-filled pages anyway (i.e.
  equivalent to zswap_stored_pages).

Use zswap_stored_pages instead in zswap_shrinker_count() to keep things
consistent whether memcgs are enabled or not, and add a comment about
the number of freeable pages possibly being scaled down more than it
should if we have lots of same-filled pages (i.e. inflated compression
ratio).

Remove nr_zswap_stored and one atomic operation in the store and free
paths.

Signed-off-by: Yosry Ahmed <yosryahmed@google.com>
---
 mm/zswap.c | 11 ++++++-----
 1 file changed, 6 insertions(+), 5 deletions(-)

Comments

Yosry Ahmed March 21, 2024, 9:09 p.m. UTC | #1
On Tue, Mar 19, 2024 at 7:08 PM Yosry Ahmed <yosryahmed@google.com> wrote:
>
> zswap_nr_stored is used to maintain the number of stored pages in zswap
> that are not same-filled pages. It is used in zswap_shrinker_count() to
> scale the number of freeable compressed pages by the compression ratio.
> That is, to reduce the amount of writeback from zswap with higher
> compression ratios as the ROI from IO diminishes.
>
> However, the need for this counter is questionable due to two reasons:
> - It is redundant. The value can be inferred from (zswap_stored_pages -
>   zswap_same_filled_pages).
> - When memcgs are enabled, we use memcg_page_state(memcg,
>   MEMCG_ZSWAPPED), which includes same-filled pages anyway (i.e.
>   equivalent to zswap_stored_pages).
>
> Use zswap_stored_pages instead in zswap_shrinker_count() to keep things
> consistent whether memcgs are enabled or not, and add a comment about
> the number of freeable pages possibly being scaled down more than it
> should if we have lots of same-filled pages (i.e. inflated compression
> ratio).
>
> Remove nr_zswap_stored and one atomic operation in the store and free
> paths.
>
> Signed-off-by: Yosry Ahmed <yosryahmed@google.com>

Any thoughts on this patch? Should I resend it separately?

> ---
>  mm/zswap.c | 11 ++++++-----
>  1 file changed, 6 insertions(+), 5 deletions(-)
>
> diff --git a/mm/zswap.c b/mm/zswap.c
> index 323f1dea43d22..ffcfce05a4408 100644
> --- a/mm/zswap.c
> +++ b/mm/zswap.c
> @@ -181,8 +181,6 @@ struct zswap_pool {
>
>  /* Global LRU lists shared by all zswap pools. */
>  static struct list_lru zswap_list_lru;
> -/* counter of pages stored in all zswap pools. */
> -static atomic_t zswap_nr_stored = ATOMIC_INIT(0);
>
>  /* The lock protects zswap_next_shrink updates. */
>  static DEFINE_SPINLOCK(zswap_shrink_lock);
> @@ -880,7 +878,6 @@ static void zswap_entry_free(struct zswap_entry *entry)
>         else {
>                 zswap_lru_del(&zswap_list_lru, entry);
>                 zpool_free(zswap_find_zpool(entry), entry->handle);
> -               atomic_dec(&zswap_nr_stored);
>                 zswap_pool_put(entry->pool);
>         }
>         if (entry->objcg) {
> @@ -1305,7 +1302,7 @@ static unsigned long zswap_shrinker_count(struct shrinker *shrinker,
>  #else
>         /* use pool stats instead of memcg stats */
>         nr_backing = zswap_total_pages();
> -       nr_stored = atomic_read(&zswap_nr_stored);
> +       nr_stored = atomic_read(&zswap_stored_pages);
>  #endif
>
>         if (!nr_stored)
> @@ -1325,6 +1322,11 @@ static unsigned long zswap_shrinker_count(struct shrinker *shrinker,
>          * This ensures that the better zswap compresses memory, the fewer
>          * pages we will evict to swap (as it will otherwise incur IO for
>          * relatively small memory saving).
> +        *
> +        * The memory saving factor calculated here takes same-filled pages into
> +        * account, but those are not freeable since they almost occupy no
> +        * space. Hence, we may scale nr_freeable down a little bit more than we
> +        * should if we have a lot of same-filled pages.
>          */
>         return mult_frac(nr_freeable, nr_backing, nr_stored);
>  }
> @@ -1570,7 +1572,6 @@ bool zswap_store(struct folio *folio)
>         if (entry->length) {
>                 INIT_LIST_HEAD(&entry->lru);
>                 zswap_lru_add(&zswap_list_lru, entry);
> -               atomic_inc(&zswap_nr_stored);
>         }
>         spin_unlock(&tree->lock);
>
> --
> 2.44.0.291.gc1ea87d7ee-goog
>
Nhat Pham March 21, 2024, 11:50 p.m. UTC | #2
On Thu, Mar 21, 2024 at 2:09 PM Yosry Ahmed <yosryahmed@google.com> wrote:
>
> On Tue, Mar 19, 2024 at 7:08 PM Yosry Ahmed <yosryahmed@google.com> wrote:
> >
> > zswap_nr_stored is used to maintain the number of stored pages in zswap
> > that are not same-filled pages. It is used in zswap_shrinker_count() to
> > scale the number of freeable compressed pages by the compression ratio.
> > That is, to reduce the amount of writeback from zswap with higher
> > compression ratios as the ROI from IO diminishes.
> >
> > However, the need for this counter is questionable due to two reasons:
> > - It is redundant. The value can be inferred from (zswap_stored_pages -
> >   zswap_same_filled_pages).

Ah, I forgot about this. For context, nr_stored was originally a
zswap_pool-specific stat, but I think Chengming has pulled it out and
converted it into a global pool stat in an earlier patch - yet,
globally, we already have zswap_stored_pages that is (mostly) the same
counter.

Might as well use existing counters (zswap_stored_pages) then, rather
than a newly introduced counter. Probably will shave off a couple
cycles here and there for the atomic increment/decrement :)

> > - When memcgs are enabled, we use memcg_page_state(memcg,
> >   MEMCG_ZSWAPPED), which includes same-filled pages anyway (i.e.
> >   equivalent to zswap_stored_pages).

This is fine I suppose. I was aware of this weird inaccuracy. However,
for the CONFIG_MEMCG case, it was kinda silly to introduce the counter
for per-cgroup same filled zswap pages, just for this one purpose, so
I decided to accept the inaccuracy.

> >
> > Use zswap_stored_pages instead in zswap_shrinker_count() to keep things
> > consistent whether memcgs are enabled or not, and add a comment about
> > the number of freeable pages possibly being scaled down more than it
> > should if we have lots of same-filled pages (i.e. inflated compression
> > ratio).
> >
> > Remove nr_zswap_stored and one atomic operation in the store and free
> > paths.
> >
> > Signed-off-by: Yosry Ahmed <yosryahmed@google.com>
>
> Any thoughts on this patch? Should I resend it separately?

Might be worth resending it separately, but up to you and Andrew!

Reviewed-by: Nhat Pham <nphamcs@gmail.com>
Yosry Ahmed March 21, 2024, 11:57 p.m. UTC | #3
On Thu, Mar 21, 2024 at 4:50 PM Nhat Pham <nphamcs@gmail.com> wrote:
>
> On Thu, Mar 21, 2024 at 2:09 PM Yosry Ahmed <yosryahmed@google.com> wrote:
> >
> > On Tue, Mar 19, 2024 at 7:08 PM Yosry Ahmed <yosryahmed@google.com> wrote:
> > >
> > > zswap_nr_stored is used to maintain the number of stored pages in zswap
> > > that are not same-filled pages. It is used in zswap_shrinker_count() to
> > > scale the number of freeable compressed pages by the compression ratio.
> > > That is, to reduce the amount of writeback from zswap with higher
> > > compression ratios as the ROI from IO diminishes.
> > >
> > > However, the need for this counter is questionable due to two reasons:
> > > - It is redundant. The value can be inferred from (zswap_stored_pages -
> > >   zswap_same_filled_pages).
>
> Ah, I forgot about this. For context, nr_stored was originally a
> zswap_pool-specific stat, but I think Chengming has pulled it out and
> converted it into a global pool stat in an earlier patch - yet,
> globally, we already have zswap_stored_pages that is (mostly) the same
> counter.

Thanks for the context.

>
> Might as well use existing counters (zswap_stored_pages) then, rather
> than a newly introduced counter. Probably will shave off a couple
> cycles here and there for the atomic increment/decrement :)
>
> > > - When memcgs are enabled, we use memcg_page_state(memcg,
> > >   MEMCG_ZSWAPPED), which includes same-filled pages anyway (i.e.
> > >   equivalent to zswap_stored_pages).
>
> This is fine I suppose. I was aware of this weird inaccuracy. However,
> for the CONFIG_MEMCG case, it was kinda silly to introduce the counter
> for per-cgroup same filled zswap pages, just for this one purpose, so
> I decided to accept the inaccuracy.
>
> > >
> > > Use zswap_stored_pages instead in zswap_shrinker_count() to keep things
> > > consistent whether memcgs are enabled or not, and add a comment about
> > > the number of freeable pages possibly being scaled down more than it
> > > should if we have lots of same-filled pages (i.e. inflated compression
> > > ratio).
> > >
> > > Remove nr_zswap_stored and one atomic operation in the store and free
> > > paths.
> > >
> > > Signed-off-by: Yosry Ahmed <yosryahmed@google.com>
> >
> > Any thoughts on this patch? Should I resend it separately?
>
> Might be worth resending it separately, but up to you and Andrew!

I will resend to add some context and include your R-b, thanks.

>
> Reviewed-by: Nhat Pham <nphamcs@gmail.com>
diff mbox series

Patch

diff --git a/mm/zswap.c b/mm/zswap.c
index 323f1dea43d22..ffcfce05a4408 100644
--- a/mm/zswap.c
+++ b/mm/zswap.c
@@ -181,8 +181,6 @@  struct zswap_pool {
 
 /* Global LRU lists shared by all zswap pools. */
 static struct list_lru zswap_list_lru;
-/* counter of pages stored in all zswap pools. */
-static atomic_t zswap_nr_stored = ATOMIC_INIT(0);
 
 /* The lock protects zswap_next_shrink updates. */
 static DEFINE_SPINLOCK(zswap_shrink_lock);
@@ -880,7 +878,6 @@  static void zswap_entry_free(struct zswap_entry *entry)
 	else {
 		zswap_lru_del(&zswap_list_lru, entry);
 		zpool_free(zswap_find_zpool(entry), entry->handle);
-		atomic_dec(&zswap_nr_stored);
 		zswap_pool_put(entry->pool);
 	}
 	if (entry->objcg) {
@@ -1305,7 +1302,7 @@  static unsigned long zswap_shrinker_count(struct shrinker *shrinker,
 #else
 	/* use pool stats instead of memcg stats */
 	nr_backing = zswap_total_pages();
-	nr_stored = atomic_read(&zswap_nr_stored);
+	nr_stored = atomic_read(&zswap_stored_pages);
 #endif
 
 	if (!nr_stored)
@@ -1325,6 +1322,11 @@  static unsigned long zswap_shrinker_count(struct shrinker *shrinker,
 	 * This ensures that the better zswap compresses memory, the fewer
 	 * pages we will evict to swap (as it will otherwise incur IO for
 	 * relatively small memory saving).
+	 *
+	 * The memory saving factor calculated here takes same-filled pages into
+	 * account, but those are not freeable since they almost occupy no
+	 * space. Hence, we may scale nr_freeable down a little bit more than we
+	 * should if we have a lot of same-filled pages.
 	 */
 	return mult_frac(nr_freeable, nr_backing, nr_stored);
 }
@@ -1570,7 +1572,6 @@  bool zswap_store(struct folio *folio)
 	if (entry->length) {
 		INIT_LIST_HEAD(&entry->lru);
 		zswap_lru_add(&zswap_list_lru, entry);
-		atomic_inc(&zswap_nr_stored);
 	}
 	spin_unlock(&tree->lock);