diff mbox series

[v5,10/18] zsmalloc: factor out pool locking helpers

Message ID 20250212063153.179231-11-senozhatsky@chromium.org (mailing list archive)
State New
Headers show
Series zsmalloc/zram: there be preemption | expand

Commit Message

Sergey Senozhatsky Feb. 12, 2025, 6:27 a.m. UTC
We currently have a mix of migrate_{read,write}_lock() helpers
that lock zspages, but it's zs_pool that actually has a ->migrate_lock
access to which is opene-coded.  Factor out pool migrate locking
into helpers, zspage migration locking API will be renamed to
reduce confusion.

It's worth mentioning that zsmalloc locks sync not only migration,
but also compaction.

Signed-off-by: Sergey Senozhatsky <senozhatsky@chromium.org>
---
 mm/zsmalloc.c | 63 +++++++++++++++++++++++++++++++++++----------------
 1 file changed, 44 insertions(+), 19 deletions(-)

Comments

Yosry Ahmed Feb. 12, 2025, 4:18 p.m. UTC | #1
On Wed, Feb 12, 2025 at 03:27:08PM +0900, Sergey Senozhatsky wrote:
> We currently have a mix of migrate_{read,write}_lock() helpers
> that lock zspages, but it's zs_pool that actually has a ->migrate_lock
> access to which is opene-coded.  Factor out pool migrate locking
> into helpers, zspage migration locking API will be renamed to
> reduce confusion.
> 
> It's worth mentioning that zsmalloc locks sync not only migration,
> but also compaction.
> 
> Signed-off-by: Sergey Senozhatsky <senozhatsky@chromium.org>

FWIW I don't see a lot of value in the helpers (renaming the lock is
useful tho). We open-code other locks like the class lock anyway, and
the helpers obscure the underlying lock type without adding much value
in terms of readability/conciseness.

> ---
>  mm/zsmalloc.c | 63 +++++++++++++++++++++++++++++++++++----------------
>  1 file changed, 44 insertions(+), 19 deletions(-)
> 
> diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
> index 6d0e47f7ae33..47c638df47c5 100644
> --- a/mm/zsmalloc.c
> +++ b/mm/zsmalloc.c
> @@ -18,7 +18,7 @@
>  /*
>   * lock ordering:
>   *	page_lock
> - *	pool->migrate_lock
> + *	pool->lock
>   *	class->lock
>   *	zspage->lock
>   */
> @@ -224,10 +224,35 @@ struct zs_pool {
>  	struct work_struct free_work;
>  #endif
>  	/* protect page/zspage migration */
> -	rwlock_t migrate_lock;
> +	rwlock_t lock;
>  	atomic_t compaction_in_progress;
>  };
>  
> +static void pool_write_unlock(struct zs_pool *pool)
> +{
> +	write_unlock(&pool->lock);
> +}
> +
> +static void pool_write_lock(struct zs_pool *pool)
> +{
> +	write_lock(&pool->lock);
> +}
> +
> +static void pool_read_unlock(struct zs_pool *pool)
> +{
> +	read_unlock(&pool->lock);
> +}
> +
> +static void pool_read_lock(struct zs_pool *pool)
> +{
> +	read_lock(&pool->lock);
> +}
> +
> +static bool pool_lock_is_contended(struct zs_pool *pool)
> +{
> +	return rwlock_is_contended(&pool->lock);
> +}
> +
>  static inline void zpdesc_set_first(struct zpdesc *zpdesc)
>  {
>  	SetPagePrivate(zpdesc_page(zpdesc));
> @@ -1206,7 +1231,7 @@ void *zs_map_object(struct zs_pool *pool, unsigned long handle,
>  	BUG_ON(in_interrupt());
>  
>  	/* It guarantees it can get zspage from handle safely */
> -	read_lock(&pool->migrate_lock);
> +	pool_read_lock(pool);
>  	obj = handle_to_obj(handle);
>  	obj_to_location(obj, &zpdesc, &obj_idx);
>  	zspage = get_zspage(zpdesc);
> @@ -1218,7 +1243,7 @@ void *zs_map_object(struct zs_pool *pool, unsigned long handle,
>  	 * which is smaller granularity.
>  	 */
>  	migrate_read_lock(zspage);
> -	read_unlock(&pool->migrate_lock);
> +	pool_read_unlock(pool);
>  
>  	class = zspage_class(pool, zspage);
>  	off = offset_in_page(class->size * obj_idx);
> @@ -1450,16 +1475,16 @@ void zs_free(struct zs_pool *pool, unsigned long handle)
>  		return;
>  
>  	/*
> -	 * The pool->migrate_lock protects the race with zpage's migration
> +	 * The pool->lock protects the race with zpage's migration
>  	 * so it's safe to get the page from handle.
>  	 */
> -	read_lock(&pool->migrate_lock);
> +	pool_read_lock(pool);
>  	obj = handle_to_obj(handle);
>  	obj_to_zpdesc(obj, &f_zpdesc);
>  	zspage = get_zspage(f_zpdesc);
>  	class = zspage_class(pool, zspage);
>  	spin_lock(&class->lock);
> -	read_unlock(&pool->migrate_lock);
> +	pool_read_unlock(pool);
>  
>  	class_stat_sub(class, ZS_OBJS_INUSE, 1);
>  	obj_free(class->size, obj);
> @@ -1793,10 +1818,10 @@ static int zs_page_migrate(struct page *newpage, struct page *page,
>  	pool = zspage->pool;
>  
>  	/*
> -	 * The pool migrate_lock protects the race between zpage migration
> +	 * The pool lock protects the race between zpage migration
>  	 * and zs_free.
>  	 */
> -	write_lock(&pool->migrate_lock);
> +	pool_write_lock(pool);
>  	class = zspage_class(pool, zspage);
>  
>  	/*
> @@ -1833,7 +1858,7 @@ static int zs_page_migrate(struct page *newpage, struct page *page,
>  	 * Since we complete the data copy and set up new zspage structure,
>  	 * it's okay to release migration_lock.
>  	 */
> -	write_unlock(&pool->migrate_lock);
> +	pool_write_unlock(pool);
>  	spin_unlock(&class->lock);
>  	migrate_write_unlock(zspage);
>  
> @@ -1956,7 +1981,7 @@ static unsigned long __zs_compact(struct zs_pool *pool,
>  	 * protect the race between zpage migration and zs_free
>  	 * as well as zpage allocation/free
>  	 */
> -	write_lock(&pool->migrate_lock);
> +	pool_write_lock(pool);
>  	spin_lock(&class->lock);
>  	while (zs_can_compact(class)) {
>  		int fg;
> @@ -1983,14 +2008,14 @@ static unsigned long __zs_compact(struct zs_pool *pool,
>  		src_zspage = NULL;
>  
>  		if (get_fullness_group(class, dst_zspage) == ZS_INUSE_RATIO_100
> -		    || rwlock_is_contended(&pool->migrate_lock)) {
> +		    || pool_lock_is_contended(pool)) {
>  			putback_zspage(class, dst_zspage);
>  			dst_zspage = NULL;
>  
>  			spin_unlock(&class->lock);
> -			write_unlock(&pool->migrate_lock);
> +			pool_write_unlock(pool);
>  			cond_resched();
> -			write_lock(&pool->migrate_lock);
> +			pool_write_lock(pool);
>  			spin_lock(&class->lock);
>  		}
>  	}
> @@ -2002,7 +2027,7 @@ static unsigned long __zs_compact(struct zs_pool *pool,
>  		putback_zspage(class, dst_zspage);
>  
>  	spin_unlock(&class->lock);
> -	write_unlock(&pool->migrate_lock);
> +	pool_write_unlock(pool);
>  
>  	return pages_freed;
>  }
> @@ -2014,10 +2039,10 @@ unsigned long zs_compact(struct zs_pool *pool)
>  	unsigned long pages_freed = 0;
>  
>  	/*
> -	 * Pool compaction is performed under pool->migrate_lock so it is basically
> +	 * Pool compaction is performed under pool->lock so it is basically
>  	 * single-threaded. Having more than one thread in __zs_compact()
> -	 * will increase pool->migrate_lock contention, which will impact other
> -	 * zsmalloc operations that need pool->migrate_lock.
> +	 * will increase pool->lock contention, which will impact other
> +	 * zsmalloc operations that need pool->lock.
>  	 */
>  	if (atomic_xchg(&pool->compaction_in_progress, 1))
>  		return 0;
> @@ -2139,7 +2164,7 @@ struct zs_pool *zs_create_pool(const char *name)
>  		return NULL;
>  
>  	init_deferred_free(pool);
> -	rwlock_init(&pool->migrate_lock);
> +	rwlock_init(&pool->lock);
>  	atomic_set(&pool->compaction_in_progress, 0);
>  
>  	pool->name = kstrdup(name, GFP_KERNEL);
> -- 
> 2.48.1.502.g6dc24dfdaf-goog
>
Yosry Ahmed Feb. 12, 2025, 4:19 p.m. UTC | #2
On Wed, Feb 12, 2025 at 04:18:14PM +0000, Yosry Ahmed wrote:
> On Wed, Feb 12, 2025 at 03:27:08PM +0900, Sergey Senozhatsky wrote:
> > We currently have a mix of migrate_{read,write}_lock() helpers
> > that lock zspages, but it's zs_pool that actually has a ->migrate_lock
> > access to which is opene-coded.  Factor out pool migrate locking
> > into helpers, zspage migration locking API will be renamed to
> > reduce confusion.
> > 
> > It's worth mentioning that zsmalloc locks sync not only migration,
> > but also compaction.
> > 
> > Signed-off-by: Sergey Senozhatsky <senozhatsky@chromium.org>
> 
> FWIW I don't see a lot of value in the helpers (renaming the lock is
> useful tho). We open-code other locks like the class lock anyway, and
> the helpers obscure the underlying lock type without adding much value
> in terms of readability/conciseness.

We use helpers for the class lock in the following change, but my point
stands for that too.

> 
> > ---
> >  mm/zsmalloc.c | 63 +++++++++++++++++++++++++++++++++++----------------
> >  1 file changed, 44 insertions(+), 19 deletions(-)
> > 
> > diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
> > index 6d0e47f7ae33..47c638df47c5 100644
> > --- a/mm/zsmalloc.c
> > +++ b/mm/zsmalloc.c
> > @@ -18,7 +18,7 @@
> >  /*
> >   * lock ordering:
> >   *	page_lock
> > - *	pool->migrate_lock
> > + *	pool->lock
> >   *	class->lock
> >   *	zspage->lock
> >   */
> > @@ -224,10 +224,35 @@ struct zs_pool {
> >  	struct work_struct free_work;
> >  #endif
> >  	/* protect page/zspage migration */
> > -	rwlock_t migrate_lock;
> > +	rwlock_t lock;
> >  	atomic_t compaction_in_progress;
> >  };
> >  
> > +static void pool_write_unlock(struct zs_pool *pool)
> > +{
> > +	write_unlock(&pool->lock);
> > +}
> > +
> > +static void pool_write_lock(struct zs_pool *pool)
> > +{
> > +	write_lock(&pool->lock);
> > +}
> > +
> > +static void pool_read_unlock(struct zs_pool *pool)
> > +{
> > +	read_unlock(&pool->lock);
> > +}
> > +
> > +static void pool_read_lock(struct zs_pool *pool)
> > +{
> > +	read_lock(&pool->lock);
> > +}
> > +
> > +static bool pool_lock_is_contended(struct zs_pool *pool)
> > +{
> > +	return rwlock_is_contended(&pool->lock);
> > +}
> > +
> >  static inline void zpdesc_set_first(struct zpdesc *zpdesc)
> >  {
> >  	SetPagePrivate(zpdesc_page(zpdesc));
> > @@ -1206,7 +1231,7 @@ void *zs_map_object(struct zs_pool *pool, unsigned long handle,
> >  	BUG_ON(in_interrupt());
> >  
> >  	/* It guarantees it can get zspage from handle safely */
> > -	read_lock(&pool->migrate_lock);
> > +	pool_read_lock(pool);
> >  	obj = handle_to_obj(handle);
> >  	obj_to_location(obj, &zpdesc, &obj_idx);
> >  	zspage = get_zspage(zpdesc);
> > @@ -1218,7 +1243,7 @@ void *zs_map_object(struct zs_pool *pool, unsigned long handle,
> >  	 * which is smaller granularity.
> >  	 */
> >  	migrate_read_lock(zspage);
> > -	read_unlock(&pool->migrate_lock);
> > +	pool_read_unlock(pool);
> >  
> >  	class = zspage_class(pool, zspage);
> >  	off = offset_in_page(class->size * obj_idx);
> > @@ -1450,16 +1475,16 @@ void zs_free(struct zs_pool *pool, unsigned long handle)
> >  		return;
> >  
> >  	/*
> > -	 * The pool->migrate_lock protects the race with zpage's migration
> > +	 * The pool->lock protects the race with zpage's migration
> >  	 * so it's safe to get the page from handle.
> >  	 */
> > -	read_lock(&pool->migrate_lock);
> > +	pool_read_lock(pool);
> >  	obj = handle_to_obj(handle);
> >  	obj_to_zpdesc(obj, &f_zpdesc);
> >  	zspage = get_zspage(f_zpdesc);
> >  	class = zspage_class(pool, zspage);
> >  	spin_lock(&class->lock);
> > -	read_unlock(&pool->migrate_lock);
> > +	pool_read_unlock(pool);
> >  
> >  	class_stat_sub(class, ZS_OBJS_INUSE, 1);
> >  	obj_free(class->size, obj);
> > @@ -1793,10 +1818,10 @@ static int zs_page_migrate(struct page *newpage, struct page *page,
> >  	pool = zspage->pool;
> >  
> >  	/*
> > -	 * The pool migrate_lock protects the race between zpage migration
> > +	 * The pool lock protects the race between zpage migration
> >  	 * and zs_free.
> >  	 */
> > -	write_lock(&pool->migrate_lock);
> > +	pool_write_lock(pool);
> >  	class = zspage_class(pool, zspage);
> >  
> >  	/*
> > @@ -1833,7 +1858,7 @@ static int zs_page_migrate(struct page *newpage, struct page *page,
> >  	 * Since we complete the data copy and set up new zspage structure,
> >  	 * it's okay to release migration_lock.
> >  	 */
> > -	write_unlock(&pool->migrate_lock);
> > +	pool_write_unlock(pool);
> >  	spin_unlock(&class->lock);
> >  	migrate_write_unlock(zspage);
> >  
> > @@ -1956,7 +1981,7 @@ static unsigned long __zs_compact(struct zs_pool *pool,
> >  	 * protect the race between zpage migration and zs_free
> >  	 * as well as zpage allocation/free
> >  	 */
> > -	write_lock(&pool->migrate_lock);
> > +	pool_write_lock(pool);
> >  	spin_lock(&class->lock);
> >  	while (zs_can_compact(class)) {
> >  		int fg;
> > @@ -1983,14 +2008,14 @@ static unsigned long __zs_compact(struct zs_pool *pool,
> >  		src_zspage = NULL;
> >  
> >  		if (get_fullness_group(class, dst_zspage) == ZS_INUSE_RATIO_100
> > -		    || rwlock_is_contended(&pool->migrate_lock)) {
> > +		    || pool_lock_is_contended(pool)) {
> >  			putback_zspage(class, dst_zspage);
> >  			dst_zspage = NULL;
> >  
> >  			spin_unlock(&class->lock);
> > -			write_unlock(&pool->migrate_lock);
> > +			pool_write_unlock(pool);
> >  			cond_resched();
> > -			write_lock(&pool->migrate_lock);
> > +			pool_write_lock(pool);
> >  			spin_lock(&class->lock);
> >  		}
> >  	}
> > @@ -2002,7 +2027,7 @@ static unsigned long __zs_compact(struct zs_pool *pool,
> >  		putback_zspage(class, dst_zspage);
> >  
> >  	spin_unlock(&class->lock);
> > -	write_unlock(&pool->migrate_lock);
> > +	pool_write_unlock(pool);
> >  
> >  	return pages_freed;
> >  }
> > @@ -2014,10 +2039,10 @@ unsigned long zs_compact(struct zs_pool *pool)
> >  	unsigned long pages_freed = 0;
> >  
> >  	/*
> > -	 * Pool compaction is performed under pool->migrate_lock so it is basically
> > +	 * Pool compaction is performed under pool->lock so it is basically
> >  	 * single-threaded. Having more than one thread in __zs_compact()
> > -	 * will increase pool->migrate_lock contention, which will impact other
> > -	 * zsmalloc operations that need pool->migrate_lock.
> > +	 * will increase pool->lock contention, which will impact other
> > +	 * zsmalloc operations that need pool->lock.
> >  	 */
> >  	if (atomic_xchg(&pool->compaction_in_progress, 1))
> >  		return 0;
> > @@ -2139,7 +2164,7 @@ struct zs_pool *zs_create_pool(const char *name)
> >  		return NULL;
> >  
> >  	init_deferred_free(pool);
> > -	rwlock_init(&pool->migrate_lock);
> > +	rwlock_init(&pool->lock);
> >  	atomic_set(&pool->compaction_in_progress, 0);
> >  
> >  	pool->name = kstrdup(name, GFP_KERNEL);
> > -- 
> > 2.48.1.502.g6dc24dfdaf-goog
> > 
>
Sergey Senozhatsky Feb. 13, 2025, 12:57 a.m. UTC | #3
On (25/02/12 16:18), Yosry Ahmed wrote:
> On Wed, Feb 12, 2025 at 03:27:08PM +0900, Sergey Senozhatsky wrote:
> > We currently have a mix of migrate_{read,write}_lock() helpers
> > that lock zspages, but it's zs_pool that actually has a ->migrate_lock
> > access to which is opene-coded.  Factor out pool migrate locking
> > into helpers, zspage migration locking API will be renamed to
> > reduce confusion.
> > 
> > It's worth mentioning that zsmalloc locks sync not only migration,
> > but also compaction.
> > 
> > Signed-off-by: Sergey Senozhatsky <senozhatsky@chromium.org>
> 
> FWIW I don't see a lot of value in the helpers (renaming the lock is
> useful tho).

I want to hide the details, keep them in one place and at some
point *in the future* have the same "locking rules" as for zspage
lock.  Also *possibly* throwing a couple of lockdep assertions.
So I'd prefer to abstract all of these.
Yosry Ahmed Feb. 13, 2025, 1:12 a.m. UTC | #4
February 12, 2025 at 4:57 PM, "Sergey Senozhatsky" <senozhatsky@chromium.org> wrote:



> 
> On (25/02/12 16:18), Yosry Ahmed wrote:
> 
> > 
> > On Wed, Feb 12, 2025 at 03:27:08PM +0900, Sergey Senozhatsky wrote:
> > 
> >  We currently have a mix of migrate_{read,write}_lock() helpers
> > 
> >  that lock zspages, but it's zs_pool that actually has a ->migrate_lock
> > 
> >  access to which is opene-coded. Factor out pool migrate locking
> > 
> >  into helpers, zspage migration locking API will be renamed to
> > 
> >  reduce confusion.
> > 
> >  
> > 
> >  It's worth mentioning that zsmalloc locks sync not only migration,
> > 
> >  but also compaction.
> > 
> >  
> > 
> >  Signed-off-by: Sergey Senozhatsky <senozhatsky@chromium.org>
> > 
> >  
> > 
> >  FWIW I don't see a lot of value in the helpers (renaming the lock is
> > 
> >  useful tho).
> > 
> 
> I want to hide the details, keep them in one place and at some
> 
> point *in the future* have the same "locking rules" as for zspage
> 
> lock. Also *possibly* throwing a couple of lockdep assertions.
> 
> So I'd prefer to abstract all of these.


I'd prefer to introduce the abstractions when they are needed tbh. Right now they just make the code less readable.
Sergey Senozhatsky Feb. 13, 2025, 2:54 a.m. UTC | #5
On (25/02/13 01:12), Yosry Ahmed wrote:
> > I want to hide the details, keep them in one place and at some
> > 
> > point *in the future* have the same "locking rules" as for zspage
> > 
> > lock. Also *possibly* throwing a couple of lockdep assertions.
> > 
> > So I'd prefer to abstract all of these.
> 
> 
> I'd prefer to introduce the abstractions when they are needed tbh. Right now they just make the code less readable.

OK, gone now.  I think I didn't screw things up resolving the conflicts.
diff mbox series

Patch

diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
index 6d0e47f7ae33..47c638df47c5 100644
--- a/mm/zsmalloc.c
+++ b/mm/zsmalloc.c
@@ -18,7 +18,7 @@ 
 /*
  * lock ordering:
  *	page_lock
- *	pool->migrate_lock
+ *	pool->lock
  *	class->lock
  *	zspage->lock
  */
@@ -224,10 +224,35 @@  struct zs_pool {
 	struct work_struct free_work;
 #endif
 	/* protect page/zspage migration */
-	rwlock_t migrate_lock;
+	rwlock_t lock;
 	atomic_t compaction_in_progress;
 };
 
+static void pool_write_unlock(struct zs_pool *pool)
+{
+	write_unlock(&pool->lock);
+}
+
+static void pool_write_lock(struct zs_pool *pool)
+{
+	write_lock(&pool->lock);
+}
+
+static void pool_read_unlock(struct zs_pool *pool)
+{
+	read_unlock(&pool->lock);
+}
+
+static void pool_read_lock(struct zs_pool *pool)
+{
+	read_lock(&pool->lock);
+}
+
+static bool pool_lock_is_contended(struct zs_pool *pool)
+{
+	return rwlock_is_contended(&pool->lock);
+}
+
 static inline void zpdesc_set_first(struct zpdesc *zpdesc)
 {
 	SetPagePrivate(zpdesc_page(zpdesc));
@@ -1206,7 +1231,7 @@  void *zs_map_object(struct zs_pool *pool, unsigned long handle,
 	BUG_ON(in_interrupt());
 
 	/* It guarantees it can get zspage from handle safely */
-	read_lock(&pool->migrate_lock);
+	pool_read_lock(pool);
 	obj = handle_to_obj(handle);
 	obj_to_location(obj, &zpdesc, &obj_idx);
 	zspage = get_zspage(zpdesc);
@@ -1218,7 +1243,7 @@  void *zs_map_object(struct zs_pool *pool, unsigned long handle,
 	 * which is smaller granularity.
 	 */
 	migrate_read_lock(zspage);
-	read_unlock(&pool->migrate_lock);
+	pool_read_unlock(pool);
 
 	class = zspage_class(pool, zspage);
 	off = offset_in_page(class->size * obj_idx);
@@ -1450,16 +1475,16 @@  void zs_free(struct zs_pool *pool, unsigned long handle)
 		return;
 
 	/*
-	 * The pool->migrate_lock protects the race with zpage's migration
+	 * The pool->lock protects the race with zpage's migration
 	 * so it's safe to get the page from handle.
 	 */
-	read_lock(&pool->migrate_lock);
+	pool_read_lock(pool);
 	obj = handle_to_obj(handle);
 	obj_to_zpdesc(obj, &f_zpdesc);
 	zspage = get_zspage(f_zpdesc);
 	class = zspage_class(pool, zspage);
 	spin_lock(&class->lock);
-	read_unlock(&pool->migrate_lock);
+	pool_read_unlock(pool);
 
 	class_stat_sub(class, ZS_OBJS_INUSE, 1);
 	obj_free(class->size, obj);
@@ -1793,10 +1818,10 @@  static int zs_page_migrate(struct page *newpage, struct page *page,
 	pool = zspage->pool;
 
 	/*
-	 * The pool migrate_lock protects the race between zpage migration
+	 * The pool lock protects the race between zpage migration
 	 * and zs_free.
 	 */
-	write_lock(&pool->migrate_lock);
+	pool_write_lock(pool);
 	class = zspage_class(pool, zspage);
 
 	/*
@@ -1833,7 +1858,7 @@  static int zs_page_migrate(struct page *newpage, struct page *page,
 	 * Since we complete the data copy and set up new zspage structure,
 	 * it's okay to release migration_lock.
 	 */
-	write_unlock(&pool->migrate_lock);
+	pool_write_unlock(pool);
 	spin_unlock(&class->lock);
 	migrate_write_unlock(zspage);
 
@@ -1956,7 +1981,7 @@  static unsigned long __zs_compact(struct zs_pool *pool,
 	 * protect the race between zpage migration and zs_free
 	 * as well as zpage allocation/free
 	 */
-	write_lock(&pool->migrate_lock);
+	pool_write_lock(pool);
 	spin_lock(&class->lock);
 	while (zs_can_compact(class)) {
 		int fg;
@@ -1983,14 +2008,14 @@  static unsigned long __zs_compact(struct zs_pool *pool,
 		src_zspage = NULL;
 
 		if (get_fullness_group(class, dst_zspage) == ZS_INUSE_RATIO_100
-		    || rwlock_is_contended(&pool->migrate_lock)) {
+		    || pool_lock_is_contended(pool)) {
 			putback_zspage(class, dst_zspage);
 			dst_zspage = NULL;
 
 			spin_unlock(&class->lock);
-			write_unlock(&pool->migrate_lock);
+			pool_write_unlock(pool);
 			cond_resched();
-			write_lock(&pool->migrate_lock);
+			pool_write_lock(pool);
 			spin_lock(&class->lock);
 		}
 	}
@@ -2002,7 +2027,7 @@  static unsigned long __zs_compact(struct zs_pool *pool,
 		putback_zspage(class, dst_zspage);
 
 	spin_unlock(&class->lock);
-	write_unlock(&pool->migrate_lock);
+	pool_write_unlock(pool);
 
 	return pages_freed;
 }
@@ -2014,10 +2039,10 @@  unsigned long zs_compact(struct zs_pool *pool)
 	unsigned long pages_freed = 0;
 
 	/*
-	 * Pool compaction is performed under pool->migrate_lock so it is basically
+	 * Pool compaction is performed under pool->lock so it is basically
 	 * single-threaded. Having more than one thread in __zs_compact()
-	 * will increase pool->migrate_lock contention, which will impact other
-	 * zsmalloc operations that need pool->migrate_lock.
+	 * will increase pool->lock contention, which will impact other
+	 * zsmalloc operations that need pool->lock.
 	 */
 	if (atomic_xchg(&pool->compaction_in_progress, 1))
 		return 0;
@@ -2139,7 +2164,7 @@  struct zs_pool *zs_create_pool(const char *name)
 		return NULL;
 
 	init_deferred_free(pool);
-	rwlock_init(&pool->migrate_lock);
+	rwlock_init(&pool->lock);
 	atomic_set(&pool->compaction_in_progress, 0);
 
 	pool->name = kstrdup(name, GFP_KERNEL);