diff mbox series

[v2,5/5] mm: zswap: remove same_filled module params

Message ID 20240405053510.1948982-6-yosryahmed@google.com (mailing list archive)
State New
Headers show
Series zswap same-filled and limit checking cleanups | expand

Commit Message

Yosry Ahmed April 5, 2024, 5:35 a.m. UTC
These knobs offer more fine-grained control to userspace than needed and
directly expose/influence kernel implementation; remove them.

For disabling same_filled handling, there is no logical reason to refuse
storing same-filled pages more efficiently and opt for compression.
Scanning pages for patterns may be an argument, but the page contents
will be read into the CPU cache anyway during compression. Also,
removing the same_filled handling code does not move the needle
significantly in terms of performance anyway [1].

For disabling non_same_filled handling, it was added when the compressed
pages in zswap were not being properly charged to memcgs, as workloads
could escape the accounting with compression [2]. This is no longer the
case after commit f4840ccfca25 ("zswap: memcg accounting"), and using
zswap without compression does not make much sense.

[1]https://lore.kernel.org/lkml/CAJD7tkaySFP2hBQw4pnZHJJwe3bMdjJ1t9VC2VJd=khn1_TXvA@mail.gmail.com/
[2]https://lore.kernel.org/lkml/19d5cdee-2868-41bd-83d5-6da75d72e940@maciej.szmigiero.name/

Signed-off-by: Yosry Ahmed <yosryahmed@google.com>
Acked-by: Johannes Weiner <hannes@cmpxchg.org>
---
 mm/zswap.c | 19 -------------------
 1 file changed, 19 deletions(-)

Comments

Nhat Pham April 5, 2024, 7:58 p.m. UTC | #1
On Thu, Apr 4, 2024 at 10:35 PM Yosry Ahmed <yosryahmed@google.com> wrote:
>
> These knobs offer more fine-grained control to userspace than needed and
> directly expose/influence kernel implementation; remove them.
>
> For disabling same_filled handling, there is no logical reason to refuse
> storing same-filled pages more efficiently and opt for compression.
> Scanning pages for patterns may be an argument, but the page contents
> will be read into the CPU cache anyway during compression. Also,
> removing the same_filled handling code does not move the needle
> significantly in terms of performance anyway [1].
>
> For disabling non_same_filled handling, it was added when the compressed
> pages in zswap were not being properly charged to memcgs, as workloads
> could escape the accounting with compression [2]. This is no longer the
> case after commit f4840ccfca25 ("zswap: memcg accounting"), and using
> zswap without compression does not make much sense.
>
> [1]https://lore.kernel.org/lkml/CAJD7tkaySFP2hBQw4pnZHJJwe3bMdjJ1t9VC2VJd=khn1_TXvA@mail.gmail.com/
> [2]https://lore.kernel.org/lkml/19d5cdee-2868-41bd-83d5-6da75d72e940@maciej.szmigiero.name/
>
> Signed-off-by: Yosry Ahmed <yosryahmed@google.com>
> Acked-by: Johannes Weiner <hannes@cmpxchg.org>

I see fewer code to maintain, I like :)
The code LGTM, and we already discuss the justifications in the
original series, so:

Reviewed-by: Nhat Pham <nphamcs@gmail.com>
Chengming Zhou April 10, 2024, 2:31 a.m. UTC | #2
On 2024/4/5 13:35, Yosry Ahmed wrote:
> These knobs offer more fine-grained control to userspace than needed and
> directly expose/influence kernel implementation; remove them.
> 
> For disabling same_filled handling, there is no logical reason to refuse
> storing same-filled pages more efficiently and opt for compression.
> Scanning pages for patterns may be an argument, but the page contents
> will be read into the CPU cache anyway during compression. Also,
> removing the same_filled handling code does not move the needle
> significantly in terms of performance anyway [1].
> 
> For disabling non_same_filled handling, it was added when the compressed
> pages in zswap were not being properly charged to memcgs, as workloads
> could escape the accounting with compression [2]. This is no longer the
> case after commit f4840ccfca25 ("zswap: memcg accounting"), and using
> zswap without compression does not make much sense.
> 
> [1]https://lore.kernel.org/lkml/CAJD7tkaySFP2hBQw4pnZHJJwe3bMdjJ1t9VC2VJd=khn1_TXvA@mail.gmail.com/
> [2]https://lore.kernel.org/lkml/19d5cdee-2868-41bd-83d5-6da75d72e940@maciej.szmigiero.name/
> 
> Signed-off-by: Yosry Ahmed <yosryahmed@google.com>
> Acked-by: Johannes Weiner <hannes@cmpxchg.org>

Nice cleanup!

Reviewed-by: Chengming Zhou <chengming.zhou@linux.dev>

Thanks.

> ---
>  mm/zswap.c | 19 -------------------
>  1 file changed, 19 deletions(-)
> 
> diff --git a/mm/zswap.c b/mm/zswap.c
> index 13869d18c13bd..b738435215218 100644
> --- a/mm/zswap.c
> +++ b/mm/zswap.c
> @@ -140,19 +140,6 @@ static const struct kernel_param_ops zswap_accept_thr_param_ops = {
>  module_param_cb(accept_threshold_percent, &zswap_accept_thr_param_ops,
>  		&zswap_accept_thr_percent, 0644);
>  
> -/*
> - * Enable/disable handling same-value filled pages (enabled by default).
> - * If disabled every page is considered non-same-value filled.
> - */
> -static bool zswap_same_filled_pages_enabled = true;
> -module_param_named(same_filled_pages_enabled, zswap_same_filled_pages_enabled,
> -		   bool, 0644);
> -
> -/* Enable/disable handling non-same-value filled pages (enabled by default) */
> -static bool zswap_non_same_filled_pages_enabled = true;
> -module_param_named(non_same_filled_pages_enabled, zswap_non_same_filled_pages_enabled,
> -		   bool, 0644);
> -
>  /* Number of zpools in zswap_pool (empirically determined for scalability) */
>  #define ZSWAP_NR_ZPOOLS 32
>  
> @@ -1421,9 +1408,6 @@ static bool zswap_is_folio_same_filled(struct folio *folio, unsigned long *value
>  	unsigned int pos, last_pos = PAGE_SIZE / sizeof(*page) - 1;
>  	bool ret = false;
>  
> -	if (!zswap_same_filled_pages_enabled)
> -		return false;
> -
>  	page = kmap_local_folio(folio, 0);
>  	val = page[0];
>  
> @@ -1512,9 +1496,6 @@ bool zswap_store(struct folio *folio)
>  		goto store_entry;
>  	}
>  
> -	if (!zswap_non_same_filled_pages_enabled)
> -		goto freepage;
> -
>  	/* if entry is successfully added, it keeps the reference */
>  	entry->pool = zswap_pool_current_get();
>  	if (!entry->pool)
diff mbox series

Patch

diff --git a/mm/zswap.c b/mm/zswap.c
index 13869d18c13bd..b738435215218 100644
--- a/mm/zswap.c
+++ b/mm/zswap.c
@@ -140,19 +140,6 @@  static const struct kernel_param_ops zswap_accept_thr_param_ops = {
 module_param_cb(accept_threshold_percent, &zswap_accept_thr_param_ops,
 		&zswap_accept_thr_percent, 0644);
 
-/*
- * Enable/disable handling same-value filled pages (enabled by default).
- * If disabled every page is considered non-same-value filled.
- */
-static bool zswap_same_filled_pages_enabled = true;
-module_param_named(same_filled_pages_enabled, zswap_same_filled_pages_enabled,
-		   bool, 0644);
-
-/* Enable/disable handling non-same-value filled pages (enabled by default) */
-static bool zswap_non_same_filled_pages_enabled = true;
-module_param_named(non_same_filled_pages_enabled, zswap_non_same_filled_pages_enabled,
-		   bool, 0644);
-
 /* Number of zpools in zswap_pool (empirically determined for scalability) */
 #define ZSWAP_NR_ZPOOLS 32
 
@@ -1421,9 +1408,6 @@  static bool zswap_is_folio_same_filled(struct folio *folio, unsigned long *value
 	unsigned int pos, last_pos = PAGE_SIZE / sizeof(*page) - 1;
 	bool ret = false;
 
-	if (!zswap_same_filled_pages_enabled)
-		return false;
-
 	page = kmap_local_folio(folio, 0);
 	val = page[0];
 
@@ -1512,9 +1496,6 @@  bool zswap_store(struct folio *folio)
 		goto store_entry;
 	}
 
-	if (!zswap_non_same_filled_pages_enabled)
-		goto freepage;
-
 	/* if entry is successfully added, it keeps the reference */
 	entry->pool = zswap_pool_current_get();
 	if (!entry->pool)