Message ID | 20240130014208.565554-11-hannes@cmpxchg.org (mailing list archive) |
---|---|
State | New |
Headers | show |
Series | mm: zswap: cleanups | expand |
On Mon, Jan 29, 2024 at 5:42 PM Johannes Weiner <hannes@cmpxchg.org> wrote: > > The function ordering in zswap.c is a little chaotic, which requires > jumping in unexpected directions when following related code. This is > a series of patches that brings the file into the following order: > > - pool functions > - lru functions > - rbtree functions > - zswap entry functions > - compression/backend functions > - writeback & shrinking functions > - store, load, invalidate, swapon, swapoff > - debugfs > - init This ordering seems reasonable to me. Then again, we don't have any coherent ordering of functions, so anything would be an improvement :) > > But it has to be split up such the moving still produces halfway > readable diffs. > > In this patch, move pool allocation and freeing functions. > > Signed-off-by: Johannes Weiner <hannes@cmpxchg.org> Reviewed-by: Nhat Pham <nphamcs@gmail.com> > --- > mm/zswap.c | 297 +++++++++++++++++++++++++++-------------------------- > 1 file changed, 152 insertions(+), 145 deletions(-) > > diff --git a/mm/zswap.c b/mm/zswap.c > index 082d076a758d..805d9a35f633 100644 > --- a/mm/zswap.c > +++ b/mm/zswap.c > @@ -320,6 +320,158 @@ static void zswap_update_total_size(void) > zswap_pool_total_size = total; > } > > +/********************************* > +* pool functions > +**********************************/ > + > +static void zswap_alloc_shrinker(struct zswap_pool *pool); > +static void shrink_worker(struct work_struct *w); > + > +static struct zswap_pool *zswap_pool_create(char *type, char *compressor) > +{ > + int i; > + struct zswap_pool *pool; > + char name[38]; /* 'zswap' + 32 char (max) num + \0 */ > + gfp_t gfp = __GFP_NORETRY | __GFP_NOWARN | __GFP_KSWAPD_RECLAIM; > + int ret; > + > + if (!zswap_has_pool) { > + /* if either are unset, pool initialization failed, and we > + * need both params to be set correctly before trying to > + * create a pool. > + */ > + if (!strcmp(type, ZSWAP_PARAM_UNSET)) > + return NULL; > + if (!strcmp(compressor, ZSWAP_PARAM_UNSET)) > + return NULL; > + } > + > + pool = kzalloc(sizeof(*pool), GFP_KERNEL); > + if (!pool) > + return NULL; > + > + for (i = 0; i < ZSWAP_NR_ZPOOLS; i++) { > + /* unique name for each pool specifically required by zsmalloc */ > + snprintf(name, 38, "zswap%x", > + atomic_inc_return(&zswap_pools_count)); > + > + pool->zpools[i] = zpool_create_pool(type, name, gfp); > + if (!pool->zpools[i]) { > + pr_err("%s zpool not available\n", type); > + goto error; > + } > + } > + pr_debug("using %s zpool\n", zpool_get_type(pool->zpools[0])); > + > + strscpy(pool->tfm_name, compressor, sizeof(pool->tfm_name)); > + > + pool->acomp_ctx = alloc_percpu(*pool->acomp_ctx); > + if (!pool->acomp_ctx) { > + pr_err("percpu alloc failed\n"); > + goto error; > + } > + > + ret = cpuhp_state_add_instance(CPUHP_MM_ZSWP_POOL_PREPARE, > + &pool->node); > + if (ret) > + goto error; > + > + zswap_alloc_shrinker(pool); > + if (!pool->shrinker) > + goto error; > + > + pr_debug("using %s compressor\n", pool->tfm_name); > + > + /* being the current pool takes 1 ref; this func expects the > + * caller to always add the new pool as the current pool > + */ > + kref_init(&pool->kref); > + INIT_LIST_HEAD(&pool->list); > + if (list_lru_init_memcg(&pool->list_lru, pool->shrinker)) > + goto lru_fail; > + shrinker_register(pool->shrinker); > + INIT_WORK(&pool->shrink_work, shrink_worker); > + atomic_set(&pool->nr_stored, 0); > + > + zswap_pool_debug("created", pool); > + > + return pool; > + > +lru_fail: > + list_lru_destroy(&pool->list_lru); > + shrinker_free(pool->shrinker); > +error: > + if (pool->acomp_ctx) > + free_percpu(pool->acomp_ctx); > + while (i--) > + zpool_destroy_pool(pool->zpools[i]); > + kfree(pool); > + return NULL; > +} > + > +static struct zswap_pool *__zswap_pool_create_fallback(void) > +{ > + bool has_comp, has_zpool; > + > + has_comp = crypto_has_acomp(zswap_compressor, 0, 0); > + if (!has_comp && strcmp(zswap_compressor, > + CONFIG_ZSWAP_COMPRESSOR_DEFAULT)) { > + pr_err("compressor %s not available, using default %s\n", > + zswap_compressor, CONFIG_ZSWAP_COMPRESSOR_DEFAULT); > + param_free_charp(&zswap_compressor); > + zswap_compressor = CONFIG_ZSWAP_COMPRESSOR_DEFAULT; > + has_comp = crypto_has_acomp(zswap_compressor, 0, 0); > + } > + if (!has_comp) { > + pr_err("default compressor %s not available\n", > + zswap_compressor); > + param_free_charp(&zswap_compressor); > + zswap_compressor = ZSWAP_PARAM_UNSET; > + } > + > + has_zpool = zpool_has_pool(zswap_zpool_type); > + if (!has_zpool && strcmp(zswap_zpool_type, > + CONFIG_ZSWAP_ZPOOL_DEFAULT)) { > + pr_err("zpool %s not available, using default %s\n", > + zswap_zpool_type, CONFIG_ZSWAP_ZPOOL_DEFAULT); > + param_free_charp(&zswap_zpool_type); > + zswap_zpool_type = CONFIG_ZSWAP_ZPOOL_DEFAULT; > + has_zpool = zpool_has_pool(zswap_zpool_type); > + } > + if (!has_zpool) { > + pr_err("default zpool %s not available\n", > + zswap_zpool_type); > + param_free_charp(&zswap_zpool_type); > + zswap_zpool_type = ZSWAP_PARAM_UNSET; > + } > + > + if (!has_comp || !has_zpool) > + return NULL; > + > + return zswap_pool_create(zswap_zpool_type, zswap_compressor); > +} > + > +static void zswap_pool_destroy(struct zswap_pool *pool) > +{ > + int i; > + > + zswap_pool_debug("destroying", pool); > + > + shrinker_free(pool->shrinker); > + cpuhp_state_remove_instance(CPUHP_MM_ZSWP_POOL_PREPARE, &pool->node); > + free_percpu(pool->acomp_ctx); > + list_lru_destroy(&pool->list_lru); > + > + spin_lock(&zswap_pools_lock); > + mem_cgroup_iter_break(NULL, pool->next_shrink); > + pool->next_shrink = NULL; > + spin_unlock(&zswap_pools_lock); > + > + for (i = 0; i < ZSWAP_NR_ZPOOLS; i++) > + zpool_destroy_pool(pool->zpools[i]); > + kfree(pool); > +} > + > /* should be called under RCU */ > #ifdef CONFIG_MEMCG > static inline struct mem_cgroup *mem_cgroup_from_entry(struct zswap_entry *entry) > @@ -970,151 +1122,6 @@ static void shrink_worker(struct work_struct *w) > zswap_pool_put(pool); > } > > -static struct zswap_pool *zswap_pool_create(char *type, char *compressor) > -{ > - int i; > - struct zswap_pool *pool; > - char name[38]; /* 'zswap' + 32 char (max) num + \0 */ > - gfp_t gfp = __GFP_NORETRY | __GFP_NOWARN | __GFP_KSWAPD_RECLAIM; > - int ret; > - > - if (!zswap_has_pool) { > - /* if either are unset, pool initialization failed, and we > - * need both params to be set correctly before trying to > - * create a pool. > - */ > - if (!strcmp(type, ZSWAP_PARAM_UNSET)) > - return NULL; > - if (!strcmp(compressor, ZSWAP_PARAM_UNSET)) > - return NULL; > - } > - > - pool = kzalloc(sizeof(*pool), GFP_KERNEL); > - if (!pool) > - return NULL; > - > - for (i = 0; i < ZSWAP_NR_ZPOOLS; i++) { > - /* unique name for each pool specifically required by zsmalloc */ > - snprintf(name, 38, "zswap%x", > - atomic_inc_return(&zswap_pools_count)); > - > - pool->zpools[i] = zpool_create_pool(type, name, gfp); > - if (!pool->zpools[i]) { > - pr_err("%s zpool not available\n", type); > - goto error; > - } > - } > - pr_debug("using %s zpool\n", zpool_get_type(pool->zpools[0])); > - > - strscpy(pool->tfm_name, compressor, sizeof(pool->tfm_name)); > - > - pool->acomp_ctx = alloc_percpu(*pool->acomp_ctx); > - if (!pool->acomp_ctx) { > - pr_err("percpu alloc failed\n"); > - goto error; > - } > - > - ret = cpuhp_state_add_instance(CPUHP_MM_ZSWP_POOL_PREPARE, > - &pool->node); > - if (ret) > - goto error; > - > - zswap_alloc_shrinker(pool); > - if (!pool->shrinker) > - goto error; > - > - pr_debug("using %s compressor\n", pool->tfm_name); > - > - /* being the current pool takes 1 ref; this func expects the > - * caller to always add the new pool as the current pool > - */ > - kref_init(&pool->kref); > - INIT_LIST_HEAD(&pool->list); > - if (list_lru_init_memcg(&pool->list_lru, pool->shrinker)) > - goto lru_fail; > - shrinker_register(pool->shrinker); > - INIT_WORK(&pool->shrink_work, shrink_worker); > - atomic_set(&pool->nr_stored, 0); > - > - zswap_pool_debug("created", pool); > - > - return pool; > - > -lru_fail: > - list_lru_destroy(&pool->list_lru); > - shrinker_free(pool->shrinker); > -error: > - if (pool->acomp_ctx) > - free_percpu(pool->acomp_ctx); > - while (i--) > - zpool_destroy_pool(pool->zpools[i]); > - kfree(pool); > - return NULL; > -} > - > -static struct zswap_pool *__zswap_pool_create_fallback(void) > -{ > - bool has_comp, has_zpool; > - > - has_comp = crypto_has_acomp(zswap_compressor, 0, 0); > - if (!has_comp && strcmp(zswap_compressor, > - CONFIG_ZSWAP_COMPRESSOR_DEFAULT)) { > - pr_err("compressor %s not available, using default %s\n", > - zswap_compressor, CONFIG_ZSWAP_COMPRESSOR_DEFAULT); > - param_free_charp(&zswap_compressor); > - zswap_compressor = CONFIG_ZSWAP_COMPRESSOR_DEFAULT; > - has_comp = crypto_has_acomp(zswap_compressor, 0, 0); > - } > - if (!has_comp) { > - pr_err("default compressor %s not available\n", > - zswap_compressor); > - param_free_charp(&zswap_compressor); > - zswap_compressor = ZSWAP_PARAM_UNSET; > - } > - > - has_zpool = zpool_has_pool(zswap_zpool_type); > - if (!has_zpool && strcmp(zswap_zpool_type, > - CONFIG_ZSWAP_ZPOOL_DEFAULT)) { > - pr_err("zpool %s not available, using default %s\n", > - zswap_zpool_type, CONFIG_ZSWAP_ZPOOL_DEFAULT); > - param_free_charp(&zswap_zpool_type); > - zswap_zpool_type = CONFIG_ZSWAP_ZPOOL_DEFAULT; > - has_zpool = zpool_has_pool(zswap_zpool_type); > - } > - if (!has_zpool) { > - pr_err("default zpool %s not available\n", > - zswap_zpool_type); > - param_free_charp(&zswap_zpool_type); > - zswap_zpool_type = ZSWAP_PARAM_UNSET; > - } > - > - if (!has_comp || !has_zpool) > - return NULL; > - > - return zswap_pool_create(zswap_zpool_type, zswap_compressor); > -} > - > -static void zswap_pool_destroy(struct zswap_pool *pool) > -{ > - int i; > - > - zswap_pool_debug("destroying", pool); > - > - shrinker_free(pool->shrinker); > - cpuhp_state_remove_instance(CPUHP_MM_ZSWP_POOL_PREPARE, &pool->node); > - free_percpu(pool->acomp_ctx); > - list_lru_destroy(&pool->list_lru); > - > - spin_lock(&zswap_pools_lock); > - mem_cgroup_iter_break(NULL, pool->next_shrink); > - pool->next_shrink = NULL; > - spin_unlock(&zswap_pools_lock); > - > - for (i = 0; i < ZSWAP_NR_ZPOOLS; i++) > - zpool_destroy_pool(pool->zpools[i]); > - kfree(pool); > -} > - > static int __must_check zswap_pool_get(struct zswap_pool *pool) > { > if (!pool) > -- > 2.43.0 >
diff --git a/mm/zswap.c b/mm/zswap.c index 082d076a758d..805d9a35f633 100644 --- a/mm/zswap.c +++ b/mm/zswap.c @@ -320,6 +320,158 @@ static void zswap_update_total_size(void) zswap_pool_total_size = total; } +/********************************* +* pool functions +**********************************/ + +static void zswap_alloc_shrinker(struct zswap_pool *pool); +static void shrink_worker(struct work_struct *w); + +static struct zswap_pool *zswap_pool_create(char *type, char *compressor) +{ + int i; + struct zswap_pool *pool; + char name[38]; /* 'zswap' + 32 char (max) num + \0 */ + gfp_t gfp = __GFP_NORETRY | __GFP_NOWARN | __GFP_KSWAPD_RECLAIM; + int ret; + + if (!zswap_has_pool) { + /* if either are unset, pool initialization failed, and we + * need both params to be set correctly before trying to + * create a pool. + */ + if (!strcmp(type, ZSWAP_PARAM_UNSET)) + return NULL; + if (!strcmp(compressor, ZSWAP_PARAM_UNSET)) + return NULL; + } + + pool = kzalloc(sizeof(*pool), GFP_KERNEL); + if (!pool) + return NULL; + + for (i = 0; i < ZSWAP_NR_ZPOOLS; i++) { + /* unique name for each pool specifically required by zsmalloc */ + snprintf(name, 38, "zswap%x", + atomic_inc_return(&zswap_pools_count)); + + pool->zpools[i] = zpool_create_pool(type, name, gfp); + if (!pool->zpools[i]) { + pr_err("%s zpool not available\n", type); + goto error; + } + } + pr_debug("using %s zpool\n", zpool_get_type(pool->zpools[0])); + + strscpy(pool->tfm_name, compressor, sizeof(pool->tfm_name)); + + pool->acomp_ctx = alloc_percpu(*pool->acomp_ctx); + if (!pool->acomp_ctx) { + pr_err("percpu alloc failed\n"); + goto error; + } + + ret = cpuhp_state_add_instance(CPUHP_MM_ZSWP_POOL_PREPARE, + &pool->node); + if (ret) + goto error; + + zswap_alloc_shrinker(pool); + if (!pool->shrinker) + goto error; + + pr_debug("using %s compressor\n", pool->tfm_name); + + /* being the current pool takes 1 ref; this func expects the + * caller to always add the new pool as the current pool + */ + kref_init(&pool->kref); + INIT_LIST_HEAD(&pool->list); + if (list_lru_init_memcg(&pool->list_lru, pool->shrinker)) + goto lru_fail; + shrinker_register(pool->shrinker); + INIT_WORK(&pool->shrink_work, shrink_worker); + atomic_set(&pool->nr_stored, 0); + + zswap_pool_debug("created", pool); + + return pool; + +lru_fail: + list_lru_destroy(&pool->list_lru); + shrinker_free(pool->shrinker); +error: + if (pool->acomp_ctx) + free_percpu(pool->acomp_ctx); + while (i--) + zpool_destroy_pool(pool->zpools[i]); + kfree(pool); + return NULL; +} + +static struct zswap_pool *__zswap_pool_create_fallback(void) +{ + bool has_comp, has_zpool; + + has_comp = crypto_has_acomp(zswap_compressor, 0, 0); + if (!has_comp && strcmp(zswap_compressor, + CONFIG_ZSWAP_COMPRESSOR_DEFAULT)) { + pr_err("compressor %s not available, using default %s\n", + zswap_compressor, CONFIG_ZSWAP_COMPRESSOR_DEFAULT); + param_free_charp(&zswap_compressor); + zswap_compressor = CONFIG_ZSWAP_COMPRESSOR_DEFAULT; + has_comp = crypto_has_acomp(zswap_compressor, 0, 0); + } + if (!has_comp) { + pr_err("default compressor %s not available\n", + zswap_compressor); + param_free_charp(&zswap_compressor); + zswap_compressor = ZSWAP_PARAM_UNSET; + } + + has_zpool = zpool_has_pool(zswap_zpool_type); + if (!has_zpool && strcmp(zswap_zpool_type, + CONFIG_ZSWAP_ZPOOL_DEFAULT)) { + pr_err("zpool %s not available, using default %s\n", + zswap_zpool_type, CONFIG_ZSWAP_ZPOOL_DEFAULT); + param_free_charp(&zswap_zpool_type); + zswap_zpool_type = CONFIG_ZSWAP_ZPOOL_DEFAULT; + has_zpool = zpool_has_pool(zswap_zpool_type); + } + if (!has_zpool) { + pr_err("default zpool %s not available\n", + zswap_zpool_type); + param_free_charp(&zswap_zpool_type); + zswap_zpool_type = ZSWAP_PARAM_UNSET; + } + + if (!has_comp || !has_zpool) + return NULL; + + return zswap_pool_create(zswap_zpool_type, zswap_compressor); +} + +static void zswap_pool_destroy(struct zswap_pool *pool) +{ + int i; + + zswap_pool_debug("destroying", pool); + + shrinker_free(pool->shrinker); + cpuhp_state_remove_instance(CPUHP_MM_ZSWP_POOL_PREPARE, &pool->node); + free_percpu(pool->acomp_ctx); + list_lru_destroy(&pool->list_lru); + + spin_lock(&zswap_pools_lock); + mem_cgroup_iter_break(NULL, pool->next_shrink); + pool->next_shrink = NULL; + spin_unlock(&zswap_pools_lock); + + for (i = 0; i < ZSWAP_NR_ZPOOLS; i++) + zpool_destroy_pool(pool->zpools[i]); + kfree(pool); +} + /* should be called under RCU */ #ifdef CONFIG_MEMCG static inline struct mem_cgroup *mem_cgroup_from_entry(struct zswap_entry *entry) @@ -970,151 +1122,6 @@ static void shrink_worker(struct work_struct *w) zswap_pool_put(pool); } -static struct zswap_pool *zswap_pool_create(char *type, char *compressor) -{ - int i; - struct zswap_pool *pool; - char name[38]; /* 'zswap' + 32 char (max) num + \0 */ - gfp_t gfp = __GFP_NORETRY | __GFP_NOWARN | __GFP_KSWAPD_RECLAIM; - int ret; - - if (!zswap_has_pool) { - /* if either are unset, pool initialization failed, and we - * need both params to be set correctly before trying to - * create a pool. - */ - if (!strcmp(type, ZSWAP_PARAM_UNSET)) - return NULL; - if (!strcmp(compressor, ZSWAP_PARAM_UNSET)) - return NULL; - } - - pool = kzalloc(sizeof(*pool), GFP_KERNEL); - if (!pool) - return NULL; - - for (i = 0; i < ZSWAP_NR_ZPOOLS; i++) { - /* unique name for each pool specifically required by zsmalloc */ - snprintf(name, 38, "zswap%x", - atomic_inc_return(&zswap_pools_count)); - - pool->zpools[i] = zpool_create_pool(type, name, gfp); - if (!pool->zpools[i]) { - pr_err("%s zpool not available\n", type); - goto error; - } - } - pr_debug("using %s zpool\n", zpool_get_type(pool->zpools[0])); - - strscpy(pool->tfm_name, compressor, sizeof(pool->tfm_name)); - - pool->acomp_ctx = alloc_percpu(*pool->acomp_ctx); - if (!pool->acomp_ctx) { - pr_err("percpu alloc failed\n"); - goto error; - } - - ret = cpuhp_state_add_instance(CPUHP_MM_ZSWP_POOL_PREPARE, - &pool->node); - if (ret) - goto error; - - zswap_alloc_shrinker(pool); - if (!pool->shrinker) - goto error; - - pr_debug("using %s compressor\n", pool->tfm_name); - - /* being the current pool takes 1 ref; this func expects the - * caller to always add the new pool as the current pool - */ - kref_init(&pool->kref); - INIT_LIST_HEAD(&pool->list); - if (list_lru_init_memcg(&pool->list_lru, pool->shrinker)) - goto lru_fail; - shrinker_register(pool->shrinker); - INIT_WORK(&pool->shrink_work, shrink_worker); - atomic_set(&pool->nr_stored, 0); - - zswap_pool_debug("created", pool); - - return pool; - -lru_fail: - list_lru_destroy(&pool->list_lru); - shrinker_free(pool->shrinker); -error: - if (pool->acomp_ctx) - free_percpu(pool->acomp_ctx); - while (i--) - zpool_destroy_pool(pool->zpools[i]); - kfree(pool); - return NULL; -} - -static struct zswap_pool *__zswap_pool_create_fallback(void) -{ - bool has_comp, has_zpool; - - has_comp = crypto_has_acomp(zswap_compressor, 0, 0); - if (!has_comp && strcmp(zswap_compressor, - CONFIG_ZSWAP_COMPRESSOR_DEFAULT)) { - pr_err("compressor %s not available, using default %s\n", - zswap_compressor, CONFIG_ZSWAP_COMPRESSOR_DEFAULT); - param_free_charp(&zswap_compressor); - zswap_compressor = CONFIG_ZSWAP_COMPRESSOR_DEFAULT; - has_comp = crypto_has_acomp(zswap_compressor, 0, 0); - } - if (!has_comp) { - pr_err("default compressor %s not available\n", - zswap_compressor); - param_free_charp(&zswap_compressor); - zswap_compressor = ZSWAP_PARAM_UNSET; - } - - has_zpool = zpool_has_pool(zswap_zpool_type); - if (!has_zpool && strcmp(zswap_zpool_type, - CONFIG_ZSWAP_ZPOOL_DEFAULT)) { - pr_err("zpool %s not available, using default %s\n", - zswap_zpool_type, CONFIG_ZSWAP_ZPOOL_DEFAULT); - param_free_charp(&zswap_zpool_type); - zswap_zpool_type = CONFIG_ZSWAP_ZPOOL_DEFAULT; - has_zpool = zpool_has_pool(zswap_zpool_type); - } - if (!has_zpool) { - pr_err("default zpool %s not available\n", - zswap_zpool_type); - param_free_charp(&zswap_zpool_type); - zswap_zpool_type = ZSWAP_PARAM_UNSET; - } - - if (!has_comp || !has_zpool) - return NULL; - - return zswap_pool_create(zswap_zpool_type, zswap_compressor); -} - -static void zswap_pool_destroy(struct zswap_pool *pool) -{ - int i; - - zswap_pool_debug("destroying", pool); - - shrinker_free(pool->shrinker); - cpuhp_state_remove_instance(CPUHP_MM_ZSWP_POOL_PREPARE, &pool->node); - free_percpu(pool->acomp_ctx); - list_lru_destroy(&pool->list_lru); - - spin_lock(&zswap_pools_lock); - mem_cgroup_iter_break(NULL, pool->next_shrink); - pool->next_shrink = NULL; - spin_unlock(&zswap_pools_lock); - - for (i = 0; i < ZSWAP_NR_ZPOOLS; i++) - zpool_destroy_pool(pool->zpools[i]); - kfree(pool); -} - static int __must_check zswap_pool_get(struct zswap_pool *pool) { if (!pool)
The function ordering in zswap.c is a little chaotic, which requires jumping in unexpected directions when following related code. This is a series of patches that brings the file into the following order: - pool functions - lru functions - rbtree functions - zswap entry functions - compression/backend functions - writeback & shrinking functions - store, load, invalidate, swapon, swapoff - debugfs - init But it has to be split up such the moving still produces halfway readable diffs. In this patch, move pool allocation and freeing functions. Signed-off-by: Johannes Weiner <hannes@cmpxchg.org> --- mm/zswap.c | 297 +++++++++++++++++++++++++++-------------------------- 1 file changed, 152 insertions(+), 145 deletions(-)