diff mbox series

[v2,2/2] mm/zswap: change zswap_pool kref to percpu_ref

Message ID 20240210-zswap-global-lru-v2-2-fbee3b11a62e@bytedance.com (mailing list archive)
State New
Headers show
Series mm/zswap: optimize for dynamic zswap_pools | expand

Commit Message

Chengming Zhou Feb. 14, 2024, 8:54 a.m. UTC
All zswap entries will take a reference of zswap_pool when
zswap_store(), and drop it when free. Change it to use the
percpu_ref is better for scalability performance.

Although percpu_ref use a bit more memory which should be ok
for our use case, since we almost have only one zswap_pool to
be using. The performance gain is for zswap_store/load hotpath.

Testing kernel build (32 threads) in tmpfs with memory.max=2GB.
(zswap shrinker and writeback enabled with one 50GB swapfile,
on a 128 CPUs x86-64 machine, below is the average of 5 runs)

        mm-unstable  zswap-global-lru
real    63.20        63.12
user    1061.75      1062.95
sys     268.74       264.44

Signed-off-by: Chengming Zhou <zhouchengming@bytedance.com>
---
 mm/zswap.c | 31 ++++++++++++++++++++++---------
 1 file changed, 22 insertions(+), 9 deletions(-)

Comments

Yosry Ahmed Feb. 14, 2024, 8:10 p.m. UTC | #1
On Wed, Feb 14, 2024 at 08:54:38AM +0000, Chengming Zhou wrote:
> All zswap entries will take a reference of zswap_pool when
> zswap_store(), and drop it when free. Change it to use the
> percpu_ref is better for scalability performance.
> 
> Although percpu_ref use a bit more memory which should be ok
> for our use case, since we almost have only one zswap_pool to
> be using. The performance gain is for zswap_store/load hotpath.
> 
> Testing kernel build (32 threads) in tmpfs with memory.max=2GB.
> (zswap shrinker and writeback enabled with one 50GB swapfile,
> on a 128 CPUs x86-64 machine, below is the average of 5 runs)
> 
>         mm-unstable  zswap-global-lru
> real    63.20        63.12
> user    1061.75      1062.95
> sys     268.74       264.44
> 
> Signed-off-by: Chengming Zhou <zhouchengming@bytedance.com>
> ---
>  mm/zswap.c | 31 ++++++++++++++++++++++---------
>  1 file changed, 22 insertions(+), 9 deletions(-)
> 
> diff --git a/mm/zswap.c b/mm/zswap.c
> index dbff67d7e1c7..f6470d30d337 100644
> --- a/mm/zswap.c
> +++ b/mm/zswap.c
> @@ -173,7 +173,7 @@ struct crypto_acomp_ctx {
>  struct zswap_pool {
>  	struct zpool *zpools[ZSWAP_NR_ZPOOLS];
>  	struct crypto_acomp_ctx __percpu *acomp_ctx;
> -	struct kref kref;
> +	struct percpu_ref ref;
>  	struct list_head list;
>  	struct work_struct release_work;
>  	struct hlist_node node;
> @@ -304,6 +304,7 @@ static void zswap_update_total_size(void)
>  /*********************************
>  * pool functions
>  **********************************/
> +static void __zswap_pool_empty(struct percpu_ref *ref);
>  
>  static struct zswap_pool *zswap_pool_create(char *type, char *compressor)
>  {
> @@ -357,13 +358,18 @@ static struct zswap_pool *zswap_pool_create(char *type, char *compressor)
>  	/* being the current pool takes 1 ref; this func expects the
>  	 * caller to always add the new pool as the current pool
>  	 */
> -	kref_init(&pool->kref);
> +	ret = percpu_ref_init(&pool->ref, __zswap_pool_empty,
> +			      PERCPU_REF_ALLOW_REINIT, GFP_KERNEL);
> +	if (ret)
> +		goto ref_fail;
>  	INIT_LIST_HEAD(&pool->list);
>  
>  	zswap_pool_debug("created", pool);
>  
>  	return pool;
>  
> +ref_fail:
> +	cpuhp_state_remove_instance(CPUHP_MM_ZSWP_POOL_PREPARE, &pool->node);
>  error:
>  	if (pool->acomp_ctx)
>  		free_percpu(pool->acomp_ctx);
> @@ -436,8 +442,9 @@ static void __zswap_pool_release(struct work_struct *work)
>  
>  	synchronize_rcu();
>  
> -	/* nobody should have been able to get a kref... */
> -	WARN_ON(kref_get_unless_zero(&pool->kref));
> +	/* nobody should have been able to get a ref... */
> +	WARN_ON(percpu_ref_tryget(&pool->ref));

Just curious, was there any value from using kref_get_unless_zero() over
kref_read() here? If not, I think percpu_ref_is_zero() is more
intuitive. This also seems like it fits more as a debug check.

> +	percpu_ref_exit(&pool->ref);
>  
>  	/* pool is now off zswap_pools list and has no references. */
>  	zswap_pool_destroy(pool);
> @@ -445,11 +452,11 @@ static void __zswap_pool_release(struct work_struct *work)
>  
>  static struct zswap_pool *zswap_pool_current(void);
>  
> -static void __zswap_pool_empty(struct kref *kref)
> +static void __zswap_pool_empty(struct percpu_ref *ref)
>  {
>  	struct zswap_pool *pool;
>  
> -	pool = container_of(kref, typeof(*pool), kref);
> +	pool = container_of(ref, typeof(*pool), ref);
>  
>  	spin_lock(&zswap_pools_lock);
>  
> @@ -468,12 +475,12 @@ static int __must_check zswap_pool_get(struct zswap_pool *pool)
>  	if (!pool)
>  		return 0;
>  
> -	return kref_get_unless_zero(&pool->kref);
> +	return percpu_ref_tryget(&pool->ref);
>  }
>  
>  static void zswap_pool_put(struct zswap_pool *pool)
>  {
> -	kref_put(&pool->kref, __zswap_pool_empty);
> +	percpu_ref_put(&pool->ref);
>  }
>  
>  static struct zswap_pool *__zswap_pool_current(void)
> @@ -603,6 +610,12 @@ static int __zswap_param_set(const char *val, const struct kernel_param *kp,
>  
>  	if (!pool)
>  		pool = zswap_pool_create(type, compressor);
> +	else {
> +		/* Resurrect percpu_ref to percpu mode. */
> +		percpu_ref_resurrect(&pool->ref);

I think this is not very clear. The previous code relied on the ref from
zswap_pool_find_get() to replace the initial ref that we had dropped
before. This is not needed with percpu_ref_resurrect() because it
already restores the initial ref dropped by percpu_ref_kill().

Perhaps something like:
		/*
		 * Restore the initial ref dropped by percpu_ref_kill()
		 * when the pool was decommissioned and switch it again
		 * to percpu mode.
		 /

, or am I overthinking this?

> +		/* Drop the ref from zswap_pool_find_get(). */
> +		zswap_pool_put(pool);
> +	}
>  
>  	if (pool)
>  		ret = param_set_charp(s, kp);
> @@ -641,7 +654,7 @@ static int __zswap_param_set(const char *val, const struct kernel_param *kp,
>  	 * or the new pool we failed to add
>  	 */
>  	if (put_pool)
> -		zswap_pool_put(put_pool);
> +		percpu_ref_kill(&put_pool->ref);
>  
>  	return ret;
>  }
> 
> -- 
> b4 0.10.1
Chengming Zhou Feb. 16, 2024, 8:49 a.m. UTC | #2
On 2024/2/15 04:10, Yosry Ahmed wrote:
> On Wed, Feb 14, 2024 at 08:54:38AM +0000, Chengming Zhou wrote:
>> All zswap entries will take a reference of zswap_pool when
>> zswap_store(), and drop it when free. Change it to use the
>> percpu_ref is better for scalability performance.
>>
>> Although percpu_ref use a bit more memory which should be ok
>> for our use case, since we almost have only one zswap_pool to
>> be using. The performance gain is for zswap_store/load hotpath.
>>
>> Testing kernel build (32 threads) in tmpfs with memory.max=2GB.
>> (zswap shrinker and writeback enabled with one 50GB swapfile,
>> on a 128 CPUs x86-64 machine, below is the average of 5 runs)
>>
>>         mm-unstable  zswap-global-lru
>> real    63.20        63.12
>> user    1061.75      1062.95
>> sys     268.74       264.44
>>
>> Signed-off-by: Chengming Zhou <zhouchengming@bytedance.com>
>> ---
>>  mm/zswap.c | 31 ++++++++++++++++++++++---------
>>  1 file changed, 22 insertions(+), 9 deletions(-)
>>
>> diff --git a/mm/zswap.c b/mm/zswap.c
>> index dbff67d7e1c7..f6470d30d337 100644
>> --- a/mm/zswap.c
>> +++ b/mm/zswap.c
>> @@ -173,7 +173,7 @@ struct crypto_acomp_ctx {
>>  struct zswap_pool {
>>  	struct zpool *zpools[ZSWAP_NR_ZPOOLS];
>>  	struct crypto_acomp_ctx __percpu *acomp_ctx;
>> -	struct kref kref;
>> +	struct percpu_ref ref;
>>  	struct list_head list;
>>  	struct work_struct release_work;
>>  	struct hlist_node node;
>> @@ -304,6 +304,7 @@ static void zswap_update_total_size(void)
>>  /*********************************
>>  * pool functions
>>  **********************************/
>> +static void __zswap_pool_empty(struct percpu_ref *ref);
>>  
>>  static struct zswap_pool *zswap_pool_create(char *type, char *compressor)
>>  {
>> @@ -357,13 +358,18 @@ static struct zswap_pool *zswap_pool_create(char *type, char *compressor)
>>  	/* being the current pool takes 1 ref; this func expects the
>>  	 * caller to always add the new pool as the current pool
>>  	 */
>> -	kref_init(&pool->kref);
>> +	ret = percpu_ref_init(&pool->ref, __zswap_pool_empty,
>> +			      PERCPU_REF_ALLOW_REINIT, GFP_KERNEL);
>> +	if (ret)
>> +		goto ref_fail;
>>  	INIT_LIST_HEAD(&pool->list);
>>  
>>  	zswap_pool_debug("created", pool);
>>  
>>  	return pool;
>>  
>> +ref_fail:
>> +	cpuhp_state_remove_instance(CPUHP_MM_ZSWP_POOL_PREPARE, &pool->node);
>>  error:
>>  	if (pool->acomp_ctx)
>>  		free_percpu(pool->acomp_ctx);
>> @@ -436,8 +442,9 @@ static void __zswap_pool_release(struct work_struct *work)
>>  
>>  	synchronize_rcu();
>>  
>> -	/* nobody should have been able to get a kref... */
>> -	WARN_ON(kref_get_unless_zero(&pool->kref));
>> +	/* nobody should have been able to get a ref... */
>> +	WARN_ON(percpu_ref_tryget(&pool->ref));
> 
> Just curious, was there any value from using kref_get_unless_zero() over
> kref_read() here? If not, I think percpu_ref_is_zero() is more
> intuitive. This also seems like it fits more as a debug check.

Agree, percpu_ref_is_zero() is better for debug.

> 
>> +	percpu_ref_exit(&pool->ref);
>>  
>>  	/* pool is now off zswap_pools list and has no references. */
>>  	zswap_pool_destroy(pool);
>> @@ -445,11 +452,11 @@ static void __zswap_pool_release(struct work_struct *work)
>>  
>>  static struct zswap_pool *zswap_pool_current(void);
>>  
>> -static void __zswap_pool_empty(struct kref *kref)
>> +static void __zswap_pool_empty(struct percpu_ref *ref)
>>  {
>>  	struct zswap_pool *pool;
>>  
>> -	pool = container_of(kref, typeof(*pool), kref);
>> +	pool = container_of(ref, typeof(*pool), ref);
>>  
>>  	spin_lock(&zswap_pools_lock);
>>  
>> @@ -468,12 +475,12 @@ static int __must_check zswap_pool_get(struct zswap_pool *pool)
>>  	if (!pool)
>>  		return 0;
>>  
>> -	return kref_get_unless_zero(&pool->kref);
>> +	return percpu_ref_tryget(&pool->ref);
>>  }
>>  
>>  static void zswap_pool_put(struct zswap_pool *pool)
>>  {
>> -	kref_put(&pool->kref, __zswap_pool_empty);
>> +	percpu_ref_put(&pool->ref);
>>  }
>>  
>>  static struct zswap_pool *__zswap_pool_current(void)
>> @@ -603,6 +610,12 @@ static int __zswap_param_set(const char *val, const struct kernel_param *kp,
>>  
>>  	if (!pool)
>>  		pool = zswap_pool_create(type, compressor);
>> +	else {
>> +		/* Resurrect percpu_ref to percpu mode. */
>> +		percpu_ref_resurrect(&pool->ref);
> 
> I think this is not very clear. The previous code relied on the ref from
> zswap_pool_find_get() to replace the initial ref that we had dropped
> before. This is not needed with percpu_ref_resurrect() because it
> already restores the initial ref dropped by percpu_ref_kill().
> 
> Perhaps something like:
> 		/*
> 		 * Restore the initial ref dropped by percpu_ref_kill()
> 		 * when the pool was decommissioned and switch it again
> 		 * to percpu mode.
> 		 /
> 

Ok, will add this comment, it's clearer.

Thanks!
diff mbox series

Patch

diff --git a/mm/zswap.c b/mm/zswap.c
index dbff67d7e1c7..f6470d30d337 100644
--- a/mm/zswap.c
+++ b/mm/zswap.c
@@ -173,7 +173,7 @@  struct crypto_acomp_ctx {
 struct zswap_pool {
 	struct zpool *zpools[ZSWAP_NR_ZPOOLS];
 	struct crypto_acomp_ctx __percpu *acomp_ctx;
-	struct kref kref;
+	struct percpu_ref ref;
 	struct list_head list;
 	struct work_struct release_work;
 	struct hlist_node node;
@@ -304,6 +304,7 @@  static void zswap_update_total_size(void)
 /*********************************
 * pool functions
 **********************************/
+static void __zswap_pool_empty(struct percpu_ref *ref);
 
 static struct zswap_pool *zswap_pool_create(char *type, char *compressor)
 {
@@ -357,13 +358,18 @@  static struct zswap_pool *zswap_pool_create(char *type, char *compressor)
 	/* being the current pool takes 1 ref; this func expects the
 	 * caller to always add the new pool as the current pool
 	 */
-	kref_init(&pool->kref);
+	ret = percpu_ref_init(&pool->ref, __zswap_pool_empty,
+			      PERCPU_REF_ALLOW_REINIT, GFP_KERNEL);
+	if (ret)
+		goto ref_fail;
 	INIT_LIST_HEAD(&pool->list);
 
 	zswap_pool_debug("created", pool);
 
 	return pool;
 
+ref_fail:
+	cpuhp_state_remove_instance(CPUHP_MM_ZSWP_POOL_PREPARE, &pool->node);
 error:
 	if (pool->acomp_ctx)
 		free_percpu(pool->acomp_ctx);
@@ -436,8 +442,9 @@  static void __zswap_pool_release(struct work_struct *work)
 
 	synchronize_rcu();
 
-	/* nobody should have been able to get a kref... */
-	WARN_ON(kref_get_unless_zero(&pool->kref));
+	/* nobody should have been able to get a ref... */
+	WARN_ON(percpu_ref_tryget(&pool->ref));
+	percpu_ref_exit(&pool->ref);
 
 	/* pool is now off zswap_pools list and has no references. */
 	zswap_pool_destroy(pool);
@@ -445,11 +452,11 @@  static void __zswap_pool_release(struct work_struct *work)
 
 static struct zswap_pool *zswap_pool_current(void);
 
-static void __zswap_pool_empty(struct kref *kref)
+static void __zswap_pool_empty(struct percpu_ref *ref)
 {
 	struct zswap_pool *pool;
 
-	pool = container_of(kref, typeof(*pool), kref);
+	pool = container_of(ref, typeof(*pool), ref);
 
 	spin_lock(&zswap_pools_lock);
 
@@ -468,12 +475,12 @@  static int __must_check zswap_pool_get(struct zswap_pool *pool)
 	if (!pool)
 		return 0;
 
-	return kref_get_unless_zero(&pool->kref);
+	return percpu_ref_tryget(&pool->ref);
 }
 
 static void zswap_pool_put(struct zswap_pool *pool)
 {
-	kref_put(&pool->kref, __zswap_pool_empty);
+	percpu_ref_put(&pool->ref);
 }
 
 static struct zswap_pool *__zswap_pool_current(void)
@@ -603,6 +610,12 @@  static int __zswap_param_set(const char *val, const struct kernel_param *kp,
 
 	if (!pool)
 		pool = zswap_pool_create(type, compressor);
+	else {
+		/* Resurrect percpu_ref to percpu mode. */
+		percpu_ref_resurrect(&pool->ref);
+		/* Drop the ref from zswap_pool_find_get(). */
+		zswap_pool_put(pool);
+	}
 
 	if (pool)
 		ret = param_set_charp(s, kp);
@@ -641,7 +654,7 @@  static int __zswap_param_set(const char *val, const struct kernel_param *kp,
 	 * or the new pool we failed to add
 	 */
 	if (put_pool)
-		zswap_pool_put(put_pool);
+		percpu_ref_kill(&put_pool->ref);
 
 	return ret;
 }