diff mbox series

[v8,21/21] mm/zsmalloc: update comments for page->zpdesc changes

Message ID 20241205175000.3187069-22-willy@infradead.org (mailing list archive)
State New
Headers show
Series Add zpdesc memory descriptor for zswap.zpool | expand

Commit Message

Matthew Wilcox (Oracle) Dec. 5, 2024, 5:49 p.m. UTC
From: Alex Shi <alexs@kernel.org>

After the page to zpdesc conversion, there still left few comments or
function named with page not zpdesc, let's update the comments and
rename function create_page_chain() as create_zpdesc_chain().

Signed-off-by: Alex Shi <alexs@kernel.org>
---
 mm/zsmalloc.c | 61 ++++++++++++++++++++++++++-------------------------
 1 file changed, 31 insertions(+), 30 deletions(-)

Comments

Hyeonggon Yoo Dec. 10, 2024, 3:46 p.m. UTC | #1
On Thu, Dec 05, 2024 at 05:49:58PM +0000, Matthew Wilcox (Oracle) wrote:
> From: Alex Shi <alexs@kernel.org>
>
> After the page to zpdesc conversion, there still left few comments or
> function named with page not zpdesc, let's update the comments and
> rename function create_page_chain() as create_zpdesc_chain().

Talking about updating comments and code by replacing 'page' with 'zpdesc',
I'm not sure if it makes sense to replace all instances of 'page' with 'zpdesc'.
A zpdesc is a descriptor, not a page that contains data (at least that's
what I have been thinking while writing the initial patch series).

In that context I'm still not sure if saying "a sub-zpdesc of a zspage", "lock zpdesc",
or "migrate zpdesc" makes sense because replacing 'page descriptor (aka. struct page)' with
'zpool descriptor (aka. zpdesc)' doesn't mean zsmalloc is throwing away the conecept of pages.

(...Or I might be thinking about this the wrong way)

> Signed-off-by: Alex Shi <alexs@kernel.org>
> ---
>  mm/zsmalloc.c | 61 ++++++++++++++++++++++++++-------------------------
>  1 file changed, 31 insertions(+), 30 deletions(-)
>
> diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
> index c0e7c055847a..1f5ff0fdeb42 100644
> --- a/mm/zsmalloc.c
> +++ b/mm/zsmalloc.c
> @@ -15,20 +15,19 @@
>
>  /*
>   * Following is how we use various fields and flags of underlying
> - * struct page(s) to form a zspage.
> + * struct zpdesc(page) to form a zspage.
>   *
> - * Usage of struct page fields:
> - *	page->private: points to zspage
> - *	page->index: links together all component pages of a zspage
> + * Usage of struct zpdesc fields:
> + *	zpdesc->zspage: points to zspage
> + *	zpdesc->next: links together all component zpdescs of a zspage
>   *		For the huge page, this is always 0, so we use this field
>   *		to store handle.
> - *	page->page_type: PGTY_zsmalloc, lower 24 bits locate the first object
> - *		offset in a subpage of a zspage
> - *
> - * Usage of struct page flags:
> - *	PG_private: identifies the first component page
> - *	PG_owner_priv_1: identifies the huge component page
> + *	zpdesc->first_obj_offset: PGTY_zsmalloc, lower 24 bits locate the first
> + *		object offset in a subpage of a zspage
>   *
> + * Usage of struct zpdesc(page) flags:
> + *	PG_private: identifies the first component zpdesc
> + *	PG_lock: lock all component zpdescs for a zspage free, serialize with
>   */

I think this comment is unnecessary, as it's already documented in mm/zpdesc.h.
It can be removed in patch 01.

>  #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
> @@ -194,7 +193,10 @@ struct size_class {
>  	 */
>  	int size;
>  	int objs_per_zspage;
> -	/* Number of PAGE_SIZE sized pages to combine to form a 'zspage' */
> +	/*
> +	 * Number of PAGE_SIZE sized zpdescs/pages to combine to
> +	 * form a 'zspage'
> +	 */
>  	int pages_per_zspage;
>
>  	unsigned int index;
> @@ -908,7 +910,7 @@ static void free_zspage(struct zs_pool *pool, struct size_class *class,
>
>  	/*
>  	 * Since zs_free couldn't be sleepable, this function cannot call
> -	 * lock_page. The page locks trylock_zspage got will be released
> +	 * lock_page. The zpdesc locks trylock_zspage got will be released
>  	 * by __free_zspage.
>  	 */
>  	if (!trylock_zspage(zspage)) {
> @@ -965,7 +967,7 @@ static void init_zspage(struct size_class *class, struct zspage *zspage)
>  	set_freeobj(zspage, 0);
>  }
>
> -static void create_page_chain(struct size_class *class, struct zspage *zspage,
> +static void create_zpdesc_chain(struct size_class *class, struct zspage *zspage,
>  				struct zpdesc *zpdescs[])
>  {
>  	int i;
> @@ -974,9 +976,9 @@ static void create_page_chain(struct size_class *class, struct zspage *zspage,
>  	int nr_zpdescs = class->pages_per_zspage;
>
>  	/*
> -	 * Allocate individual pages and link them together as:
> -	 * 1. all pages are linked together using zpdesc->next
> -	 * 2. each sub-page point to zspage using zpdesc->zspage
> +	 * Allocate individual zpdescs and link them together as:
> +	 * 1. all zpdescs are linked together using zpdesc->next
> +	 * 2. each sub-zpdesc point to zspage using zpdesc->zspage
>  	 *
>  	 * we set PG_private to identify the first zpdesc (i.e. no other zpdesc
>  	 * has this flag set).
> @@ -1034,7 +1036,7 @@ static struct zspage *alloc_zspage(struct zs_pool *pool,
>  		zpdescs[i] = zpdesc;
>  	}
>
> -	create_page_chain(class, zspage, zpdescs);
> +	create_zpdesc_chain(class, zspage, zpdescs);
>  	init_zspage(class, zspage);
>  	zspage->pool = pool;
>  	zspage->class = class->index;
> @@ -1351,7 +1353,7 @@ static unsigned long obj_malloc(struct zs_pool *pool,
>  		/* record handle in the header of allocated chunk */
>  		link->handle = handle | OBJ_ALLOCATED_TAG;
>  	else
> -		/* record handle to page->index */
> +		/* record handle to zpdesc->handle */
>  		zspage->first_zpdesc->handle = handle | OBJ_ALLOCATED_TAG;

the name of the field is already 'handle',
so we don't need a comment to explain it.

>  	kunmap_local(vaddr);
> @@ -1441,7 +1443,6 @@ static void obj_free(int class_size, unsigned long obj)
>  	unsigned int f_objidx;
>  	void *vaddr;
>
> -
>  	obj_to_location(obj, &f_zpdesc, &f_objidx);
>  	f_offset = offset_in_page(class_size * f_objidx);
>  	zspage = get_zspage(f_zpdesc);
> @@ -1684,19 +1685,19 @@ static int putback_zspage(struct size_class *class, struct zspage *zspage)
>  #ifdef CONFIG_COMPACTION
>  /*
>   * To prevent zspage destroy during migration, zspage freeing should
> - * hold locks of all pages in the zspage.
> + * hold locks of all component zpdesc in the zspage.
>   */
>  static void lock_zspage(struct zspage *zspage)
>  {
>  	struct zpdesc *curr_zpdesc, *zpdesc;
>
>  	/*
> -	 * Pages we haven't locked yet can be migrated off the list while we're
> +	 * Zpdesc we haven't locked yet can be migrated off the list while we're
>  	 * trying to lock them, so we need to be careful and only attempt to
> -	 * lock each page under migrate_read_lock(). Otherwise, the page we lock
> -	 * may no longer belong to the zspage. This means that we may wait for
> -	 * the wrong page to unlock, so we must take a reference to the page
> -	 * prior to waiting for it to unlock outside migrate_read_lock().
> +	 * lock each zpdesc under migrate_read_lock(). Otherwise, the zpdesc we
> +	 * lock may no longer belong to the zspage. This means that we may wait
> +	 * for the wrong zpdesc to unlock, so we must take a reference to the
> +	 * zpdesc prior to waiting for it to unlock outside migrate_read_lock().
>  	 */
>  	while (1) {
>  		migrate_read_lock(zspage);
> @@ -1771,7 +1772,7 @@ static void replace_sub_page(struct size_class *class, struct zspage *zspage,
>  		idx++;
>  	} while ((zpdesc = get_next_zpdesc(zpdesc)) != NULL);
>
> -	create_page_chain(class, zspage, zpdescs);
> +	create_zpdesc_chain(class, zspage, zpdescs);
>  	first_obj_offset = get_first_obj_offset(oldzpdesc);
>  	set_first_obj_offset(newzpdesc, first_obj_offset);
>  	if (unlikely(ZsHugePage(zspage)))
> @@ -1782,8 +1783,8 @@ static void replace_sub_page(struct size_class *class, struct zspage *zspage,
>  static bool zs_page_isolate(struct page *page, isolate_mode_t mode)
>  {
>  	/*
> -	 * Page is locked so zspage couldn't be destroyed. For detail, look at
> -	 * lock_zspage in free_zspage.
> +	 * Page/zpdesc is locked so zspage couldn't be destroyed. For detail,
> +	 * look at lock_zspage in free_zspage.
>  	 */
>  	VM_BUG_ON_PAGE(PageIsolated(page), page);
>
> @@ -1810,7 +1811,7 @@ static int zs_page_migrate(struct page *newpage, struct page *page,
>  	/* We're committed, tell the world that this is a Zsmalloc page. */
>  	__zpdesc_set_zsmalloc(newzpdesc);
>
> -	/* The page is locked, so this pointer must remain valid */
> +	/* The zpdesc/page is locked, so this pointer must remain valid */
>  	zspage = get_zspage(zpdesc);
>  	pool = zspage->pool;
>
> @@ -1883,7 +1884,7 @@ static const struct movable_operations zsmalloc_mops = {
>  };
>
>  /*
> - * Caller should hold page_lock of all pages in the zspage
> + * Caller should hold zpdesc locks of all in the zspage
>   * In here, we cannot use zspage meta data.
>   */
>  static void async_free_zspage(struct work_struct *work)
> --
> 2.45.2
>
>
Matthew Wilcox (Oracle) Dec. 11, 2024, 4:37 a.m. UTC | #2
On Wed, Dec 11, 2024 at 12:46:43AM +0900, Hyeonggon Yoo wrote:
> Talking about updating comments and code by replacing 'page' with 'zpdesc',
> I'm not sure if it makes sense to replace all instances of 'page' with 'zpdesc'.
> A zpdesc is a descriptor, not a page that contains data (at least that's
> what I have been thinking while writing the initial patch series).

Agreed.

> In that context I'm still not sure if saying "a sub-zpdesc of a zspage", "lock zpdesc",
> or "migrate zpdesc" makes sense because replacing 'page descriptor (aka. struct page)' with
> 'zpool descriptor (aka. zpdesc)' doesn't mean zsmalloc is throwing away the conecept of pages.

sub-zpdesc is a silly thing to say.  subpage is equally silly.  If it's
a page, call it a page.

However, locking the zpdesc does make sense -- we're locking the
descriptor.

> >  /*
> >   * Following is how we use various fields and flags of underlying
> > - * struct page(s) to form a zspage.
> > + * struct zpdesc(page) to form a zspage.
> >   *
> > - * Usage of struct page fields:
> > - *	page->private: points to zspage
> > - *	page->index: links together all component pages of a zspage
> > + * Usage of struct zpdesc fields:
> > + *	zpdesc->zspage: points to zspage
> > + *	zpdesc->next: links together all component zpdescs of a zspage
> >   *		For the huge page, this is always 0, so we use this field
> >   *		to store handle.
> > - *	page->page_type: PGTY_zsmalloc, lower 24 bits locate the first object
> > - *		offset in a subpage of a zspage
> > - *
> > - * Usage of struct page flags:
> > - *	PG_private: identifies the first component page
> > - *	PG_owner_priv_1: identifies the huge component page
> > + *	zpdesc->first_obj_offset: PGTY_zsmalloc, lower 24 bits locate the first
> > + *		object offset in a subpage of a zspage
> >   *
> > + * Usage of struct zpdesc(page) flags:
> > + *	PG_private: identifies the first component zpdesc
> > + *	PG_lock: lock all component zpdescs for a zspage free, serialize with
> >   */
> 
> I think this comment is unnecessary, as it's already documented in mm/zpdesc.h.
> It can be removed in patch 01.

Agreed.  I did think about doing that, so if you want to do it too,
that's two votes for doing it.

> > @@ -1351,7 +1353,7 @@ static unsigned long obj_malloc(struct zs_pool *pool,
> >  		/* record handle in the header of allocated chunk */
> >  		link->handle = handle | OBJ_ALLOCATED_TAG;
> >  	else
> > -		/* record handle to page->index */
> > +		/* record handle to zpdesc->handle */
> >  		zspage->first_zpdesc->handle = handle | OBJ_ALLOCATED_TAG;
> 
> the name of the field is already 'handle',
> so we don't need a comment to explain it.

Agreed.

Do you want to take on producing v9 or do you want me to fold in your
suggestions and send it?
Hyeonggon Yoo Dec. 12, 2024, 12:11 a.m. UTC | #3
On Wed, Dec 11, 2024 at 1:37 PM Matthew Wilcox <willy@infradead.org> wrote:
>
> On Wed, Dec 11, 2024 at 12:46:43AM +0900, Hyeonggon Yoo wrote:
> > Talking about updating comments and code by replacing 'page' with 'zpdesc',
> > I'm not sure if it makes sense to replace all instances of 'page' with 'zpdesc'.
> > A zpdesc is a descriptor, not a page that contains data (at least that's
> > what I have been thinking while writing the initial patch series).
>
> Agreed.
>
> > In that context I'm still not sure if saying "a sub-zpdesc of a zspage", "lock zpdesc",
> > or "migrate zpdesc" makes sense because replacing 'page descriptor (aka. struct page)' with
> > 'zpool descriptor (aka. zpdesc)' doesn't mean zsmalloc is throwing away the conecept of pages.
>
> sub-zpdesc is a silly thing to say.  subpage is equally silly.  If it's
> a page, call it a page.

Agreed.

> However, locking the zpdesc does make sense -- we're locking the
> descriptor.

That makes sense.

> > >  /*
> > >   * Following is how we use various fields and flags of underlying
> > > - * struct page(s) to form a zspage.
> > > + * struct zpdesc(page) to form a zspage.
> > >   *
> > > - * Usage of struct page fields:
> > > - * page->private: points to zspage
> > > - * page->index: links together all component pages of a zspage
> > > + * Usage of struct zpdesc fields:
> > > + * zpdesc->zspage: points to zspage
> > > + * zpdesc->next: links together all component zpdescs of a zspage
> > >   *         For the huge page, this is always 0, so we use this field
> > >   *         to store handle.
> > > - * page->page_type: PGTY_zsmalloc, lower 24 bits locate the first object
> > > - *         offset in a subpage of a zspage
> > > - *
> > > - * Usage of struct page flags:
> > > - * PG_private: identifies the first component page
> > > - * PG_owner_priv_1: identifies the huge component page
> > > + * zpdesc->first_obj_offset: PGTY_zsmalloc, lower 24 bits locate the first
> > > + *         object offset in a subpage of a zspage
> > >   *
> > > + * Usage of struct zpdesc(page) flags:
> > > + * PG_private: identifies the first component zpdesc
> > > + * PG_lock: lock all component zpdescs for a zspage free, serialize with
> > >   */
> >
> > I think this comment is unnecessary, as it's already documented in mm/zpdesc.h.
> > It can be removed in patch 01.
>
> Agreed.  I did think about doing that, so if you want to do it too,
> that's two votes for doing it.

Will do in v9.

> > > @@ -1351,7 +1353,7 @@ static unsigned long obj_malloc(struct zs_pool *pool,
> > >             /* record handle in the header of allocated chunk */
> > >             link->handle = handle | OBJ_ALLOCATED_TAG;
> > >     else
> > > -           /* record handle to page->index */
> > > +           /* record handle to zpdesc->handle */
> > >             zspage->first_zpdesc->handle = handle | OBJ_ALLOCATED_TAG;
> >
> > the name of the field is already 'handle',
> > so we don't need a comment to explain it.
>
> Agreed.
>
> Do you want to take on producing v9 or do you want me to fold in your
> suggestions and send it?

I will send v9 with my feedback adjusted this weekend.
Thank you for rebasing and pushing this forward.

Best,
Hyeonggon
Alex Shi Dec. 13, 2024, 2:25 a.m. UTC | #4
Hyeonggon Yoo <42.hyeyoo@gmail.com> 于2024年12月12日周四 08:12写道:
>
> On Wed, Dec 11, 2024 at 1:37 PM Matthew Wilcox <willy@infradead.org> wrote:
> >
> > On Wed, Dec 11, 2024 at 12:46:43AM +0900, Hyeonggon Yoo wrote:
> > > Talking about updating comments and code by replacing 'page' with 'zpdesc',
> > > I'm not sure if it makes sense to replace all instances of 'page' with 'zpdesc'.
> > > A zpdesc is a descriptor, not a page that contains data (at least that's
> > > what I have been thinking while writing the initial patch series).
> >
> > Agreed.
> >
> > > In that context I'm still not sure if saying "a sub-zpdesc of a zspage", "lock zpdesc",
> > > or "migrate zpdesc" makes sense because replacing 'page descriptor (aka. struct page)' with
> > > 'zpool descriptor (aka. zpdesc)' doesn't mean zsmalloc is throwing away the conecept of pages.
> >
> > sub-zpdesc is a silly thing to say.  subpage is equally silly.  If it's
> > a page, call it a page.
>
> Agreed.
>
> > However, locking the zpdesc does make sense -- we're locking the
> > descriptor.
>
> That makes sense.
>
> > > >  /*
> > > >   * Following is how we use various fields and flags of underlying
> > > > - * struct page(s) to form a zspage.
> > > > + * struct zpdesc(page) to form a zspage.
> > > >   *
> > > > - * Usage of struct page fields:
> > > > - * page->private: points to zspage
> > > > - * page->index: links together all component pages of a zspage
> > > > + * Usage of struct zpdesc fields:
> > > > + * zpdesc->zspage: points to zspage
> > > > + * zpdesc->next: links together all component zpdescs of a zspage
> > > >   *         For the huge page, this is always 0, so we use this field
> > > >   *         to store handle.
> > > > - * page->page_type: PGTY_zsmalloc, lower 24 bits locate the first object
> > > > - *         offset in a subpage of a zspage
> > > > - *
> > > > - * Usage of struct page flags:
> > > > - * PG_private: identifies the first component page
> > > > - * PG_owner_priv_1: identifies the huge component page
> > > > + * zpdesc->first_obj_offset: PGTY_zsmalloc, lower 24 bits locate the first
> > > > + *         object offset in a subpage of a zspage
> > > >   *
> > > > + * Usage of struct zpdesc(page) flags:
> > > > + * PG_private: identifies the first component zpdesc
> > > > + * PG_lock: lock all component zpdescs for a zspage free, serialize with
> > > >   */
> > >
> > > I think this comment is unnecessary, as it's already documented in mm/zpdesc.h.
> > > It can be removed in patch 01.
> >
> > Agreed.  I did think about doing that, so if you want to do it too,
> > that's two votes for doing it.
>
> Will do in v9.
>
> > > > @@ -1351,7 +1353,7 @@ static unsigned long obj_malloc(struct zs_pool *pool,
> > > >             /* record handle in the header of allocated chunk */
> > > >             link->handle = handle | OBJ_ALLOCATED_TAG;
> > > >     else
> > > > -           /* record handle to page->index */
> > > > +           /* record handle to zpdesc->handle */
> > > >             zspage->first_zpdesc->handle = handle | OBJ_ALLOCATED_TAG;
> > >
> > > the name of the field is already 'handle',
> > > so we don't need a comment to explain it.
> >
> > Agreed.
> >
> > Do you want to take on producing v9 or do you want me to fold in your
> > suggestions and send it?
>
> I will send v9 with my feedback adjusted this weekend.
> Thank you for rebasing and pushing this forward.

Very glad to see it is pushed forward. I am changing to a new career
recently, so sorry for not putting some effort into it.

Thanks a lot for you guys!

>
> Best,
> Hyeonggon
diff mbox series

Patch

diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
index c0e7c055847a..1f5ff0fdeb42 100644
--- a/mm/zsmalloc.c
+++ b/mm/zsmalloc.c
@@ -15,20 +15,19 @@ 
 
 /*
  * Following is how we use various fields and flags of underlying
- * struct page(s) to form a zspage.
+ * struct zpdesc(page) to form a zspage.
  *
- * Usage of struct page fields:
- *	page->private: points to zspage
- *	page->index: links together all component pages of a zspage
+ * Usage of struct zpdesc fields:
+ *	zpdesc->zspage: points to zspage
+ *	zpdesc->next: links together all component zpdescs of a zspage
  *		For the huge page, this is always 0, so we use this field
  *		to store handle.
- *	page->page_type: PGTY_zsmalloc, lower 24 bits locate the first object
- *		offset in a subpage of a zspage
- *
- * Usage of struct page flags:
- *	PG_private: identifies the first component page
- *	PG_owner_priv_1: identifies the huge component page
+ *	zpdesc->first_obj_offset: PGTY_zsmalloc, lower 24 bits locate the first
+ *		object offset in a subpage of a zspage
  *
+ * Usage of struct zpdesc(page) flags:
+ *	PG_private: identifies the first component zpdesc
+ *	PG_lock: lock all component zpdescs for a zspage free, serialize with
  */
 
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -194,7 +193,10 @@  struct size_class {
 	 */
 	int size;
 	int objs_per_zspage;
-	/* Number of PAGE_SIZE sized pages to combine to form a 'zspage' */
+	/*
+	 * Number of PAGE_SIZE sized zpdescs/pages to combine to
+	 * form a 'zspage'
+	 */
 	int pages_per_zspage;
 
 	unsigned int index;
@@ -908,7 +910,7 @@  static void free_zspage(struct zs_pool *pool, struct size_class *class,
 
 	/*
 	 * Since zs_free couldn't be sleepable, this function cannot call
-	 * lock_page. The page locks trylock_zspage got will be released
+	 * lock_page. The zpdesc locks trylock_zspage got will be released
 	 * by __free_zspage.
 	 */
 	if (!trylock_zspage(zspage)) {
@@ -965,7 +967,7 @@  static void init_zspage(struct size_class *class, struct zspage *zspage)
 	set_freeobj(zspage, 0);
 }
 
-static void create_page_chain(struct size_class *class, struct zspage *zspage,
+static void create_zpdesc_chain(struct size_class *class, struct zspage *zspage,
 				struct zpdesc *zpdescs[])
 {
 	int i;
@@ -974,9 +976,9 @@  static void create_page_chain(struct size_class *class, struct zspage *zspage,
 	int nr_zpdescs = class->pages_per_zspage;
 
 	/*
-	 * Allocate individual pages and link them together as:
-	 * 1. all pages are linked together using zpdesc->next
-	 * 2. each sub-page point to zspage using zpdesc->zspage
+	 * Allocate individual zpdescs and link them together as:
+	 * 1. all zpdescs are linked together using zpdesc->next
+	 * 2. each sub-zpdesc point to zspage using zpdesc->zspage
 	 *
 	 * we set PG_private to identify the first zpdesc (i.e. no other zpdesc
 	 * has this flag set).
@@ -1034,7 +1036,7 @@  static struct zspage *alloc_zspage(struct zs_pool *pool,
 		zpdescs[i] = zpdesc;
 	}
 
-	create_page_chain(class, zspage, zpdescs);
+	create_zpdesc_chain(class, zspage, zpdescs);
 	init_zspage(class, zspage);
 	zspage->pool = pool;
 	zspage->class = class->index;
@@ -1351,7 +1353,7 @@  static unsigned long obj_malloc(struct zs_pool *pool,
 		/* record handle in the header of allocated chunk */
 		link->handle = handle | OBJ_ALLOCATED_TAG;
 	else
-		/* record handle to page->index */
+		/* record handle to zpdesc->handle */
 		zspage->first_zpdesc->handle = handle | OBJ_ALLOCATED_TAG;
 
 	kunmap_local(vaddr);
@@ -1441,7 +1443,6 @@  static void obj_free(int class_size, unsigned long obj)
 	unsigned int f_objidx;
 	void *vaddr;
 
-
 	obj_to_location(obj, &f_zpdesc, &f_objidx);
 	f_offset = offset_in_page(class_size * f_objidx);
 	zspage = get_zspage(f_zpdesc);
@@ -1684,19 +1685,19 @@  static int putback_zspage(struct size_class *class, struct zspage *zspage)
 #ifdef CONFIG_COMPACTION
 /*
  * To prevent zspage destroy during migration, zspage freeing should
- * hold locks of all pages in the zspage.
+ * hold locks of all component zpdesc in the zspage.
  */
 static void lock_zspage(struct zspage *zspage)
 {
 	struct zpdesc *curr_zpdesc, *zpdesc;
 
 	/*
-	 * Pages we haven't locked yet can be migrated off the list while we're
+	 * Zpdesc we haven't locked yet can be migrated off the list while we're
 	 * trying to lock them, so we need to be careful and only attempt to
-	 * lock each page under migrate_read_lock(). Otherwise, the page we lock
-	 * may no longer belong to the zspage. This means that we may wait for
-	 * the wrong page to unlock, so we must take a reference to the page
-	 * prior to waiting for it to unlock outside migrate_read_lock().
+	 * lock each zpdesc under migrate_read_lock(). Otherwise, the zpdesc we
+	 * lock may no longer belong to the zspage. This means that we may wait
+	 * for the wrong zpdesc to unlock, so we must take a reference to the
+	 * zpdesc prior to waiting for it to unlock outside migrate_read_lock().
 	 */
 	while (1) {
 		migrate_read_lock(zspage);
@@ -1771,7 +1772,7 @@  static void replace_sub_page(struct size_class *class, struct zspage *zspage,
 		idx++;
 	} while ((zpdesc = get_next_zpdesc(zpdesc)) != NULL);
 
-	create_page_chain(class, zspage, zpdescs);
+	create_zpdesc_chain(class, zspage, zpdescs);
 	first_obj_offset = get_first_obj_offset(oldzpdesc);
 	set_first_obj_offset(newzpdesc, first_obj_offset);
 	if (unlikely(ZsHugePage(zspage)))
@@ -1782,8 +1783,8 @@  static void replace_sub_page(struct size_class *class, struct zspage *zspage,
 static bool zs_page_isolate(struct page *page, isolate_mode_t mode)
 {
 	/*
-	 * Page is locked so zspage couldn't be destroyed. For detail, look at
-	 * lock_zspage in free_zspage.
+	 * Page/zpdesc is locked so zspage couldn't be destroyed. For detail,
+	 * look at lock_zspage in free_zspage.
 	 */
 	VM_BUG_ON_PAGE(PageIsolated(page), page);
 
@@ -1810,7 +1811,7 @@  static int zs_page_migrate(struct page *newpage, struct page *page,
 	/* We're committed, tell the world that this is a Zsmalloc page. */
 	__zpdesc_set_zsmalloc(newzpdesc);
 
-	/* The page is locked, so this pointer must remain valid */
+	/* The zpdesc/page is locked, so this pointer must remain valid */
 	zspage = get_zspage(zpdesc);
 	pool = zspage->pool;
 
@@ -1883,7 +1884,7 @@  static const struct movable_operations zsmalloc_mops = {
 };
 
 /*
- * Caller should hold page_lock of all pages in the zspage
+ * Caller should hold zpdesc locks of all in the zspage
  * In here, we cannot use zspage meta data.
  */
 static void async_free_zspage(struct work_struct *work)