Message ID | 20230803070820.3775663-1-ruanjinjie@huawei.com (mailing list archive) |
---|---|
State | New |
Headers | show |
Series | [-next] mm: zswap: use helper function put_z3fold_locked() | expand |
On Thu, Aug 3, 2023 at 9:09 AM Ruan Jinjie <ruanjinjie@huawei.com> wrote: > > This code is already duplicated six times, use helper function > put_z3fold_locked() to release z3fold page instead of open code it > to help improve code readability a bit. No functional change involved. > > Signed-off-by: Ruan Jinjie <ruanjinjie@huawei.com> Sure, just fix the subject please. It's not zswap :) ~Vitaly > --- > mm/z3fold.c | 18 +++++++++++------- > 1 file changed, 11 insertions(+), 7 deletions(-) > > diff --git a/mm/z3fold.c b/mm/z3fold.c > index e84de91ecccb..0b483de83d95 100644 > --- a/mm/z3fold.c > +++ b/mm/z3fold.c > @@ -480,6 +480,11 @@ static void release_z3fold_page_locked_list(struct kref *ref) > __release_z3fold_page(zhdr, true); > } > > +static inline int put_z3fold_locked(struct z3fold_header *zhdr) > +{ > + return kref_put(&zhdr->refcount, release_z3fold_page_locked); > +} > + > static void free_pages_work(struct work_struct *w) > { > struct z3fold_pool *pool = container_of(w, struct z3fold_pool, work); > @@ -666,7 +671,7 @@ static struct z3fold_header *compact_single_buddy(struct z3fold_header *zhdr) > return new_zhdr; > > out_fail: > - if (new_zhdr && !kref_put(&new_zhdr->refcount, release_z3fold_page_locked)) { > + if (new_zhdr && !put_z3fold_locked(new_zhdr)) { > add_to_unbuddied(pool, new_zhdr); > z3fold_page_unlock(new_zhdr); > } > @@ -741,7 +746,7 @@ static void do_compact_page(struct z3fold_header *zhdr, bool locked) > list_del_init(&zhdr->buddy); > spin_unlock(&pool->lock); > > - if (kref_put(&zhdr->refcount, release_z3fold_page_locked)) > + if (put_z3fold_locked(zhdr)) > return; > > if (test_bit(PAGE_STALE, &page->private) || > @@ -752,7 +757,7 @@ static void do_compact_page(struct z3fold_header *zhdr, bool locked) > > if (!zhdr->foreign_handles && buddy_single(zhdr) && > zhdr->mapped_count == 0 && compact_single_buddy(zhdr)) { > - if (!kref_put(&zhdr->refcount, release_z3fold_page_locked)) { > + if (!put_z3fold_locked(zhdr)) { > clear_bit(PAGE_CLAIMED, &page->private); > z3fold_page_unlock(zhdr); > } > @@ -878,7 +883,7 @@ static inline struct z3fold_header *__z3fold_alloc(struct z3fold_pool *pool, > return zhdr; > > out_fail: > - if (!kref_put(&zhdr->refcount, release_z3fold_page_locked)) { > + if (!put_z3fold_locked(zhdr)) { > add_to_unbuddied(pool, zhdr); > z3fold_page_unlock(zhdr); > } > @@ -1012,8 +1017,7 @@ static int z3fold_alloc(struct z3fold_pool *pool, size_t size, gfp_t gfp, > if (zhdr) { > bud = get_free_buddy(zhdr, chunks); > if (bud == HEADLESS) { > - if (!kref_put(&zhdr->refcount, > - release_z3fold_page_locked)) > + if (!put_z3fold_locked(zhdr)) > z3fold_page_unlock(zhdr); > pr_err("No free chunks in unbuddied\n"); > WARN_ON(1); > @@ -1346,7 +1350,7 @@ static void z3fold_page_putback(struct page *page) > if (!list_empty(&zhdr->buddy)) > list_del_init(&zhdr->buddy); > INIT_LIST_HEAD(&page->lru); > - if (kref_put(&zhdr->refcount, release_z3fold_page_locked)) > + if (put_z3fold_locked(zhdr)) > return; > if (list_empty(&zhdr->buddy)) > add_to_unbuddied(pool, zhdr); > -- > 2.34.1 >
On 2023/8/3 15:08, Ruan Jinjie wrote: > This code is already duplicated six times, use helper function > put_z3fold_locked() to release z3fold page instead of open code it > to help improve code readability a bit. No functional change involved. > There is one "if (kref_put(&zhdr->refcount, release_z3fold_page_locked_list))" left in z3fold_free(). It might be better to add another helper for it to make code more consistent? But no preference. Thanks. > Signed-off-by: Ruan Jinjie <ruanjinjie@huawei.com> > --- > mm/z3fold.c | 18 +++++++++++------- > 1 file changed, 11 insertions(+), 7 deletions(-) > > diff --git a/mm/z3fold.c b/mm/z3fold.c > index e84de91ecccb..0b483de83d95 100644 > --- a/mm/z3fold.c > +++ b/mm/z3fold.c > @@ -480,6 +480,11 @@ static void release_z3fold_page_locked_list(struct kref *ref) > __release_z3fold_page(zhdr, true); > } > > +static inline int put_z3fold_locked(struct z3fold_header *zhdr) > +{ > + return kref_put(&zhdr->refcount, release_z3fold_page_locked); > +} > + > static void free_pages_work(struct work_struct *w) > { > struct z3fold_pool *pool = container_of(w, struct z3fold_pool, work); > @@ -666,7 +671,7 @@ static struct z3fold_header *compact_single_buddy(struct z3fold_header *zhdr) > return new_zhdr; > > out_fail: > - if (new_zhdr && !kref_put(&new_zhdr->refcount, release_z3fold_page_locked)) { > + if (new_zhdr && !put_z3fold_locked(new_zhdr)) { > add_to_unbuddied(pool, new_zhdr); > z3fold_page_unlock(new_zhdr); > } > @@ -741,7 +746,7 @@ static void do_compact_page(struct z3fold_header *zhdr, bool locked) > list_del_init(&zhdr->buddy); > spin_unlock(&pool->lock); > > - if (kref_put(&zhdr->refcount, release_z3fold_page_locked)) > + if (put_z3fold_locked(zhdr)) > return; > > if (test_bit(PAGE_STALE, &page->private) || > @@ -752,7 +757,7 @@ static void do_compact_page(struct z3fold_header *zhdr, bool locked) > > if (!zhdr->foreign_handles && buddy_single(zhdr) && > zhdr->mapped_count == 0 && compact_single_buddy(zhdr)) { > - if (!kref_put(&zhdr->refcount, release_z3fold_page_locked)) { > + if (!put_z3fold_locked(zhdr)) { > clear_bit(PAGE_CLAIMED, &page->private); > z3fold_page_unlock(zhdr); > } > @@ -878,7 +883,7 @@ static inline struct z3fold_header *__z3fold_alloc(struct z3fold_pool *pool, > return zhdr; > > out_fail: > - if (!kref_put(&zhdr->refcount, release_z3fold_page_locked)) { > + if (!put_z3fold_locked(zhdr)) { > add_to_unbuddied(pool, zhdr); > z3fold_page_unlock(zhdr); > } > @@ -1012,8 +1017,7 @@ static int z3fold_alloc(struct z3fold_pool *pool, size_t size, gfp_t gfp, > if (zhdr) { > bud = get_free_buddy(zhdr, chunks); > if (bud == HEADLESS) { > - if (!kref_put(&zhdr->refcount, > - release_z3fold_page_locked)) > + if (!put_z3fold_locked(zhdr)) > z3fold_page_unlock(zhdr); > pr_err("No free chunks in unbuddied\n"); > WARN_ON(1); > @@ -1346,7 +1350,7 @@ static void z3fold_page_putback(struct page *page) > if (!list_empty(&zhdr->buddy)) > list_del_init(&zhdr->buddy); > INIT_LIST_HEAD(&page->lru); > - if (kref_put(&zhdr->refcount, release_z3fold_page_locked)) > + if (put_z3fold_locked(zhdr)) > return; > if (list_empty(&zhdr->buddy)) > add_to_unbuddied(pool, zhdr); >
On 2023/8/3 16:11, Vitaly Wool wrote: > On Thu, Aug 3, 2023 at 9:09 AM Ruan Jinjie <ruanjinjie@huawei.com> wrote: >> >> This code is already duplicated six times, use helper function >> put_z3fold_locked() to release z3fold page instead of open code it >> to help improve code readability a bit. No functional change involved. >> >> Signed-off-by: Ruan Jinjie <ruanjinjie@huawei.com> > > Sure, just fix the subject please. It's not zswap :) Ok, thank you very much! > > ~Vitaly > >> --- >> mm/z3fold.c | 18 +++++++++++------- >> 1 file changed, 11 insertions(+), 7 deletions(-) >> >> diff --git a/mm/z3fold.c b/mm/z3fold.c >> index e84de91ecccb..0b483de83d95 100644 >> --- a/mm/z3fold.c >> +++ b/mm/z3fold.c >> @@ -480,6 +480,11 @@ static void release_z3fold_page_locked_list(struct kref *ref) >> __release_z3fold_page(zhdr, true); >> } >> >> +static inline int put_z3fold_locked(struct z3fold_header *zhdr) >> +{ >> + return kref_put(&zhdr->refcount, release_z3fold_page_locked); >> +} >> + >> static void free_pages_work(struct work_struct *w) >> { >> struct z3fold_pool *pool = container_of(w, struct z3fold_pool, work); >> @@ -666,7 +671,7 @@ static struct z3fold_header *compact_single_buddy(struct z3fold_header *zhdr) >> return new_zhdr; >> >> out_fail: >> - if (new_zhdr && !kref_put(&new_zhdr->refcount, release_z3fold_page_locked)) { >> + if (new_zhdr && !put_z3fold_locked(new_zhdr)) { >> add_to_unbuddied(pool, new_zhdr); >> z3fold_page_unlock(new_zhdr); >> } >> @@ -741,7 +746,7 @@ static void do_compact_page(struct z3fold_header *zhdr, bool locked) >> list_del_init(&zhdr->buddy); >> spin_unlock(&pool->lock); >> >> - if (kref_put(&zhdr->refcount, release_z3fold_page_locked)) >> + if (put_z3fold_locked(zhdr)) >> return; >> >> if (test_bit(PAGE_STALE, &page->private) || >> @@ -752,7 +757,7 @@ static void do_compact_page(struct z3fold_header *zhdr, bool locked) >> >> if (!zhdr->foreign_handles && buddy_single(zhdr) && >> zhdr->mapped_count == 0 && compact_single_buddy(zhdr)) { >> - if (!kref_put(&zhdr->refcount, release_z3fold_page_locked)) { >> + if (!put_z3fold_locked(zhdr)) { >> clear_bit(PAGE_CLAIMED, &page->private); >> z3fold_page_unlock(zhdr); >> } >> @@ -878,7 +883,7 @@ static inline struct z3fold_header *__z3fold_alloc(struct z3fold_pool *pool, >> return zhdr; >> >> out_fail: >> - if (!kref_put(&zhdr->refcount, release_z3fold_page_locked)) { >> + if (!put_z3fold_locked(zhdr)) { >> add_to_unbuddied(pool, zhdr); >> z3fold_page_unlock(zhdr); >> } >> @@ -1012,8 +1017,7 @@ static int z3fold_alloc(struct z3fold_pool *pool, size_t size, gfp_t gfp, >> if (zhdr) { >> bud = get_free_buddy(zhdr, chunks); >> if (bud == HEADLESS) { >> - if (!kref_put(&zhdr->refcount, >> - release_z3fold_page_locked)) >> + if (!put_z3fold_locked(zhdr)) >> z3fold_page_unlock(zhdr); >> pr_err("No free chunks in unbuddied\n"); >> WARN_ON(1); >> @@ -1346,7 +1350,7 @@ static void z3fold_page_putback(struct page *page) >> if (!list_empty(&zhdr->buddy)) >> list_del_init(&zhdr->buddy); >> INIT_LIST_HEAD(&page->lru); >> - if (kref_put(&zhdr->refcount, release_z3fold_page_locked)) >> + if (put_z3fold_locked(zhdr)) >> return; >> if (list_empty(&zhdr->buddy)) >> add_to_unbuddied(pool, zhdr); >> -- >> 2.34.1 >>
On 2023/8/3 16:38, Miaohe Lin wrote: > On 2023/8/3 15:08, Ruan Jinjie wrote: >> This code is already duplicated six times, use helper function >> put_z3fold_locked() to release z3fold page instead of open code it >> to help improve code readability a bit. No functional change involved. >> > > There is one "if (kref_put(&zhdr->refcount, release_z3fold_page_locked_list))" left in z3fold_free(). > It might be better to add another helper for it to make code more consistent? But no preference. Ok, I'll add it sooner. > > Thanks. > >> Signed-off-by: Ruan Jinjie <ruanjinjie@huawei.com> >> --- >> mm/z3fold.c | 18 +++++++++++------- >> 1 file changed, 11 insertions(+), 7 deletions(-) >> >> diff --git a/mm/z3fold.c b/mm/z3fold.c >> index e84de91ecccb..0b483de83d95 100644 >> --- a/mm/z3fold.c >> +++ b/mm/z3fold.c >> @@ -480,6 +480,11 @@ static void release_z3fold_page_locked_list(struct kref *ref) >> __release_z3fold_page(zhdr, true); >> } >> >> +static inline int put_z3fold_locked(struct z3fold_header *zhdr) >> +{ >> + return kref_put(&zhdr->refcount, release_z3fold_page_locked); >> +} >> + >> static void free_pages_work(struct work_struct *w) >> { >> struct z3fold_pool *pool = container_of(w, struct z3fold_pool, work); >> @@ -666,7 +671,7 @@ static struct z3fold_header *compact_single_buddy(struct z3fold_header *zhdr) >> return new_zhdr; >> >> out_fail: >> - if (new_zhdr && !kref_put(&new_zhdr->refcount, release_z3fold_page_locked)) { >> + if (new_zhdr && !put_z3fold_locked(new_zhdr)) { >> add_to_unbuddied(pool, new_zhdr); >> z3fold_page_unlock(new_zhdr); >> } >> @@ -741,7 +746,7 @@ static void do_compact_page(struct z3fold_header *zhdr, bool locked) >> list_del_init(&zhdr->buddy); >> spin_unlock(&pool->lock); >> >> - if (kref_put(&zhdr->refcount, release_z3fold_page_locked)) >> + if (put_z3fold_locked(zhdr)) >> return; >> >> if (test_bit(PAGE_STALE, &page->private) || >> @@ -752,7 +757,7 @@ static void do_compact_page(struct z3fold_header *zhdr, bool locked) >> >> if (!zhdr->foreign_handles && buddy_single(zhdr) && >> zhdr->mapped_count == 0 && compact_single_buddy(zhdr)) { >> - if (!kref_put(&zhdr->refcount, release_z3fold_page_locked)) { >> + if (!put_z3fold_locked(zhdr)) { >> clear_bit(PAGE_CLAIMED, &page->private); >> z3fold_page_unlock(zhdr); >> } >> @@ -878,7 +883,7 @@ static inline struct z3fold_header *__z3fold_alloc(struct z3fold_pool *pool, >> return zhdr; >> >> out_fail: >> - if (!kref_put(&zhdr->refcount, release_z3fold_page_locked)) { >> + if (!put_z3fold_locked(zhdr)) { >> add_to_unbuddied(pool, zhdr); >> z3fold_page_unlock(zhdr); >> } >> @@ -1012,8 +1017,7 @@ static int z3fold_alloc(struct z3fold_pool *pool, size_t size, gfp_t gfp, >> if (zhdr) { >> bud = get_free_buddy(zhdr, chunks); >> if (bud == HEADLESS) { >> - if (!kref_put(&zhdr->refcount, >> - release_z3fold_page_locked)) >> + if (!put_z3fold_locked(zhdr)) >> z3fold_page_unlock(zhdr); >> pr_err("No free chunks in unbuddied\n"); >> WARN_ON(1); >> @@ -1346,7 +1350,7 @@ static void z3fold_page_putback(struct page *page) >> if (!list_empty(&zhdr->buddy)) >> list_del_init(&zhdr->buddy); >> INIT_LIST_HEAD(&page->lru); >> - if (kref_put(&zhdr->refcount, release_z3fold_page_locked)) >> + if (put_z3fold_locked(zhdr)) >> return; >> if (list_empty(&zhdr->buddy)) >> add_to_unbuddied(pool, zhdr); >> >
diff --git a/mm/z3fold.c b/mm/z3fold.c index e84de91ecccb..0b483de83d95 100644 --- a/mm/z3fold.c +++ b/mm/z3fold.c @@ -480,6 +480,11 @@ static void release_z3fold_page_locked_list(struct kref *ref) __release_z3fold_page(zhdr, true); } +static inline int put_z3fold_locked(struct z3fold_header *zhdr) +{ + return kref_put(&zhdr->refcount, release_z3fold_page_locked); +} + static void free_pages_work(struct work_struct *w) { struct z3fold_pool *pool = container_of(w, struct z3fold_pool, work); @@ -666,7 +671,7 @@ static struct z3fold_header *compact_single_buddy(struct z3fold_header *zhdr) return new_zhdr; out_fail: - if (new_zhdr && !kref_put(&new_zhdr->refcount, release_z3fold_page_locked)) { + if (new_zhdr && !put_z3fold_locked(new_zhdr)) { add_to_unbuddied(pool, new_zhdr); z3fold_page_unlock(new_zhdr); } @@ -741,7 +746,7 @@ static void do_compact_page(struct z3fold_header *zhdr, bool locked) list_del_init(&zhdr->buddy); spin_unlock(&pool->lock); - if (kref_put(&zhdr->refcount, release_z3fold_page_locked)) + if (put_z3fold_locked(zhdr)) return; if (test_bit(PAGE_STALE, &page->private) || @@ -752,7 +757,7 @@ static void do_compact_page(struct z3fold_header *zhdr, bool locked) if (!zhdr->foreign_handles && buddy_single(zhdr) && zhdr->mapped_count == 0 && compact_single_buddy(zhdr)) { - if (!kref_put(&zhdr->refcount, release_z3fold_page_locked)) { + if (!put_z3fold_locked(zhdr)) { clear_bit(PAGE_CLAIMED, &page->private); z3fold_page_unlock(zhdr); } @@ -878,7 +883,7 @@ static inline struct z3fold_header *__z3fold_alloc(struct z3fold_pool *pool, return zhdr; out_fail: - if (!kref_put(&zhdr->refcount, release_z3fold_page_locked)) { + if (!put_z3fold_locked(zhdr)) { add_to_unbuddied(pool, zhdr); z3fold_page_unlock(zhdr); } @@ -1012,8 +1017,7 @@ static int z3fold_alloc(struct z3fold_pool *pool, size_t size, gfp_t gfp, if (zhdr) { bud = get_free_buddy(zhdr, chunks); if (bud == HEADLESS) { - if (!kref_put(&zhdr->refcount, - release_z3fold_page_locked)) + if (!put_z3fold_locked(zhdr)) z3fold_page_unlock(zhdr); pr_err("No free chunks in unbuddied\n"); WARN_ON(1); @@ -1346,7 +1350,7 @@ static void z3fold_page_putback(struct page *page) if (!list_empty(&zhdr->buddy)) list_del_init(&zhdr->buddy); INIT_LIST_HEAD(&page->lru); - if (kref_put(&zhdr->refcount, release_z3fold_page_locked)) + if (put_z3fold_locked(zhdr)) return; if (list_empty(&zhdr->buddy)) add_to_unbuddied(pool, zhdr);
This code is already duplicated six times, use helper function put_z3fold_locked() to release z3fold page instead of open code it to help improve code readability a bit. No functional change involved. Signed-off-by: Ruan Jinjie <ruanjinjie@huawei.com> --- mm/z3fold.c | 18 +++++++++++------- 1 file changed, 11 insertions(+), 7 deletions(-)