Message ID | 20240325235018.2028408-5-yosryahmed@google.com (mailing list archive) |
---|---|
State | New |
Headers | show |
Series | zswap: store zero-filled pages more efficiently | expand |
On Mon, Mar 25, 2024 at 4:50 PM Yosry Ahmed <yosryahmed@google.com> wrote: > > Currently, zswap_store() check zswap_same_filled_pages_enabled, kmaps > the folio, then calls zswap_is_page_same_filled() to check the folio > contents. Move this logic into zswap_is_page_same_filled() as well (and > rename it to use 'folio' while we are at it). > > This makes zswap_store() cleaner, and makes following changes to that > logic contained within the helper. > > Signed-off-by: Yosry Ahmed <yosryahmed@google.com> Reviewed-by: Nhat Pham <nphamcs@gmail.com>
On 2024/3/26 07:50, Yosry Ahmed wrote: > Currently, zswap_store() check zswap_same_filled_pages_enabled, kmaps > the folio, then calls zswap_is_page_same_filled() to check the folio > contents. Move this logic into zswap_is_page_same_filled() as well (and > rename it to use 'folio' while we are at it). > > This makes zswap_store() cleaner, and makes following changes to that > logic contained within the helper. > > Signed-off-by: Yosry Ahmed <yosryahmed@google.com> LGTM with one comment below: Reviewed-by: Chengming Zhou <chengming.zhou@linux.dev> > --- > mm/zswap.c | 45 ++++++++++++++++++++++++--------------------- > 1 file changed, 24 insertions(+), 21 deletions(-) > > diff --git a/mm/zswap.c b/mm/zswap.c > index 6b890c8590ef7..498a6c5839bef 100644 > --- a/mm/zswap.c > +++ b/mm/zswap.c > @@ -1385,26 +1385,36 @@ static void shrink_worker(struct work_struct *w) > } while (zswap_total_pages() > thr); > } > > -static int zswap_is_page_same_filled(void *ptr, unsigned long *value) > +static bool zswap_is_folio_same_filled(struct folio *folio, unsigned long *value) > { > unsigned long *page; > unsigned long val; > unsigned int pos, last_pos = PAGE_SIZE / sizeof(*page) - 1; > + bool ret; > > - page = (unsigned long *)ptr; > + if (!zswap_same_filled_pages_enabled) > + return false; > + > + page = kmap_local_folio(folio, 0); > val = page[0]; > > - if (val != page[last_pos]) > - return 0; > + if (val != page[last_pos]) { > + ret = false; > + goto out; > + } > > for (pos = 1; pos < last_pos; pos++) { > - if (val != page[pos]) > - return 0; > + if (val != page[pos]) { > + ret = false; nit: ret can be initialized to false, so > + goto out; > + } > } > > *value = val; > - > - return 1; > + ret = true; only need to set to true here. Thanks. > +out: > + kunmap_local(page); > + return ret; > } > > static void zswap_fill_page(void *ptr, unsigned long value) > @@ -1438,6 +1448,7 @@ bool zswap_store(struct folio *folio) > struct obj_cgroup *objcg = NULL; > struct mem_cgroup *memcg = NULL; > struct zswap_entry *entry; > + unsigned long value; > > VM_WARN_ON_ONCE(!folio_test_locked(folio)); > VM_WARN_ON_ONCE(!folio_test_swapcache(folio)); > @@ -1470,19 +1481,11 @@ bool zswap_store(struct folio *folio) > goto reject; > } > > - if (zswap_same_filled_pages_enabled) { > - unsigned long value; > - u8 *src; > - > - src = kmap_local_folio(folio, 0); > - if (zswap_is_page_same_filled(src, &value)) { > - kunmap_local(src); > - entry->length = 0; > - entry->value = value; > - atomic_inc(&zswap_same_filled_pages); > - goto insert_entry; > - } > - kunmap_local(src); > + if (zswap_is_folio_same_filled(folio, &value)) { > + entry->length = 0; > + entry->value = value; > + atomic_inc(&zswap_same_filled_pages); > + goto insert_entry; > } > > if (!zswap_non_same_filled_pages_enabled)
On Tue, Mar 26, 2024 at 7:39 PM Chengming Zhou <chengming.zhou@linux.dev> wrote: > > On 2024/3/26 07:50, Yosry Ahmed wrote: > > Currently, zswap_store() check zswap_same_filled_pages_enabled, kmaps > > the folio, then calls zswap_is_page_same_filled() to check the folio > > contents. Move this logic into zswap_is_page_same_filled() as well (and > > rename it to use 'folio' while we are at it). > > > > This makes zswap_store() cleaner, and makes following changes to that > > logic contained within the helper. > > > > Signed-off-by: Yosry Ahmed <yosryahmed@google.com> > > LGTM with one comment below: > > Reviewed-by: Chengming Zhou <chengming.zhou@linux.dev> > > > --- > > mm/zswap.c | 45 ++++++++++++++++++++++++--------------------- > > 1 file changed, 24 insertions(+), 21 deletions(-) > > > > diff --git a/mm/zswap.c b/mm/zswap.c > > index 6b890c8590ef7..498a6c5839bef 100644 > > --- a/mm/zswap.c > > +++ b/mm/zswap.c > > @@ -1385,26 +1385,36 @@ static void shrink_worker(struct work_struct *w) > > } while (zswap_total_pages() > thr); > > } > > > > -static int zswap_is_page_same_filled(void *ptr, unsigned long *value) > > +static bool zswap_is_folio_same_filled(struct folio *folio, unsigned long *value) > > { > > unsigned long *page; > > unsigned long val; > > unsigned int pos, last_pos = PAGE_SIZE / sizeof(*page) - 1; > > + bool ret; > > > > - page = (unsigned long *)ptr; > > + if (!zswap_same_filled_pages_enabled) > > + return false; > > + > > + page = kmap_local_folio(folio, 0); > > val = page[0]; > > > > - if (val != page[last_pos]) > > - return 0; > > + if (val != page[last_pos]) { > > + ret = false; > > + goto out; > > + } > > > > for (pos = 1; pos < last_pos; pos++) { > > - if (val != page[pos]) > > - return 0; > > + if (val != page[pos]) { > > + ret = false; > > nit: ret can be initialized to false, so > > > + goto out; > > + } > > } > > > > *value = val; > > - > > - return 1; > > + ret = true; > > only need to set to true here. I didn't bother improving the code here because patch 6 will replace it anyway, but I will do that in the next version anyway, might as well. Thanks!
diff --git a/mm/zswap.c b/mm/zswap.c index 6b890c8590ef7..498a6c5839bef 100644 --- a/mm/zswap.c +++ b/mm/zswap.c @@ -1385,26 +1385,36 @@ static void shrink_worker(struct work_struct *w) } while (zswap_total_pages() > thr); } -static int zswap_is_page_same_filled(void *ptr, unsigned long *value) +static bool zswap_is_folio_same_filled(struct folio *folio, unsigned long *value) { unsigned long *page; unsigned long val; unsigned int pos, last_pos = PAGE_SIZE / sizeof(*page) - 1; + bool ret; - page = (unsigned long *)ptr; + if (!zswap_same_filled_pages_enabled) + return false; + + page = kmap_local_folio(folio, 0); val = page[0]; - if (val != page[last_pos]) - return 0; + if (val != page[last_pos]) { + ret = false; + goto out; + } for (pos = 1; pos < last_pos; pos++) { - if (val != page[pos]) - return 0; + if (val != page[pos]) { + ret = false; + goto out; + } } *value = val; - - return 1; + ret = true; +out: + kunmap_local(page); + return ret; } static void zswap_fill_page(void *ptr, unsigned long value) @@ -1438,6 +1448,7 @@ bool zswap_store(struct folio *folio) struct obj_cgroup *objcg = NULL; struct mem_cgroup *memcg = NULL; struct zswap_entry *entry; + unsigned long value; VM_WARN_ON_ONCE(!folio_test_locked(folio)); VM_WARN_ON_ONCE(!folio_test_swapcache(folio)); @@ -1470,19 +1481,11 @@ bool zswap_store(struct folio *folio) goto reject; } - if (zswap_same_filled_pages_enabled) { - unsigned long value; - u8 *src; - - src = kmap_local_folio(folio, 0); - if (zswap_is_page_same_filled(src, &value)) { - kunmap_local(src); - entry->length = 0; - entry->value = value; - atomic_inc(&zswap_same_filled_pages); - goto insert_entry; - } - kunmap_local(src); + if (zswap_is_folio_same_filled(folio, &value)) { + entry->length = 0; + entry->value = value; + atomic_inc(&zswap_same_filled_pages); + goto insert_entry; } if (!zswap_non_same_filled_pages_enabled)
Currently, zswap_store() check zswap_same_filled_pages_enabled, kmaps the folio, then calls zswap_is_page_same_filled() to check the folio contents. Move this logic into zswap_is_page_same_filled() as well (and rename it to use 'folio' while we are at it). This makes zswap_store() cleaner, and makes following changes to that logic contained within the helper. Signed-off-by: Yosry Ahmed <yosryahmed@google.com> --- mm/zswap.c | 45 ++++++++++++++++++++++++--------------------- 1 file changed, 24 insertions(+), 21 deletions(-)