diff mbox series

[RFC,22/25] mm/zsmalloc: convert restore_freelist() to use zsdesc

Message ID 20230220132218.546369-23-42.hyeyoo@gmail.com (mailing list archive)
State New
Headers show
Series mm/zsmalloc: Split zsdesc from struct page | expand

Commit Message

Hyeonggon Yoo Feb. 20, 2023, 1:22 p.m. UTC
Convert restore_freelist() to use zsdesc.

Signed-off-by: Hyeonggon Yoo <42.hyeyoo@gmail.com>
---
 mm/zsmalloc.c | 44 ++++++++++++++++++++++----------------------
 1 file changed, 22 insertions(+), 22 deletions(-)
diff mbox series

Patch

diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
index e9202bb14704..b6ca93012c9a 100644
--- a/mm/zsmalloc.c
+++ b/mm/zsmalloc.c
@@ -2718,29 +2718,29 @@  static void restore_freelist(struct zs_pool *pool, struct size_class *class,
 {
 	unsigned int obj_idx = 0;
 	unsigned long handle, off = 0; /* off is within-page offset */
-	struct page *page = get_first_page(zspage);
+	struct zsdesc *zsdesc = get_first_zsdesc(zspage);
 	struct link_free *prev_free = NULL;
-	void *prev_page_vaddr = NULL;
+	void *prev_zsdesc_vaddr = NULL;
 
 	/* in case no free object found */
 	set_freeobj(zspage, (unsigned int)(-1UL));
 
-	while (page) {
-		void *vaddr = kmap_atomic(page);
-		struct page *next_page;
+	while (zsdesc) {
+		void *vaddr = zsdesc_kmap_atomic(zsdesc);
+		struct zsdesc *next_zsdesc;
 
 		while (off < PAGE_SIZE) {
 			void *obj_addr = vaddr + off;
 
 			/* skip allocated object */
-			if (obj_allocated(page_zsdesc(page), obj_addr, &handle)) {
+			if (obj_allocated(zsdesc, obj_addr, &handle)) {
 				obj_idx++;
 				off += class->size;
 				continue;
 			}
 
 			/* free deferred handle from reclaim attempt */
-			if (obj_stores_deferred_handle(page_zsdesc(page), obj_addr, &handle))
+			if (obj_stores_deferred_handle(zsdesc, obj_addr, &handle))
 				cache_free_handle(pool, handle);
 
 			if (prev_free)
@@ -2749,10 +2749,10 @@  static void restore_freelist(struct zs_pool *pool, struct size_class *class,
 				set_freeobj(zspage, obj_idx);
 
 			prev_free = (struct link_free *)vaddr + off / sizeof(*prev_free);
-			/* if last free object in a previous page, need to unmap */
-			if (prev_page_vaddr) {
-				kunmap_atomic(prev_page_vaddr);
-				prev_page_vaddr = NULL;
+			/* if last free object in a previous zsdesc, need to unmap */
+			if (prev_zsdesc_vaddr) {
+				kunmap_atomic(prev_zsdesc_vaddr);
+				prev_zsdesc_vaddr = NULL;
 			}
 
 			obj_idx++;
@@ -2760,19 +2760,19 @@  static void restore_freelist(struct zs_pool *pool, struct size_class *class,
 		}
 
 		/*
-		 * Handle the last (full or partial) object on this page.
+		 * Handle the last (full or partial) object on this zsdesc.
 		 */
-		next_page = get_next_page(page);
-		if (next_page) {
-			if (!prev_free || prev_page_vaddr) {
+		next_zsdesc = get_next_zsdesc(zsdesc);
+		if (next_zsdesc) {
+			if (!prev_free || prev_zsdesc_vaddr) {
 				/*
 				 * There is no free object in this page, so we can safely
 				 * unmap it.
 				 */
 				kunmap_atomic(vaddr);
 			} else {
-				/* update prev_page_vaddr since prev_free is on this page */
-				prev_page_vaddr = vaddr;
+				/* update prev_zsdesc_vaddr since prev_free is on this zsdesc */
+				prev_zsdesc_vaddr = vaddr;
 			}
 		} else { /* this is the last page */
 			if (prev_free) {
@@ -2783,16 +2783,16 @@  static void restore_freelist(struct zs_pool *pool, struct size_class *class,
 				prev_free->next = -1UL << OBJ_TAG_BITS;
 			}
 
-			/* unmap previous page (if not done yet) */
-			if (prev_page_vaddr) {
-				kunmap_atomic(prev_page_vaddr);
-				prev_page_vaddr = NULL;
+			/* unmap previous zsdesc (if not done yet) */
+			if (prev_zsdesc_vaddr) {
+				kunmap_atomic(prev_zsdesc_vaddr);
+				prev_zsdesc_vaddr = NULL;
 			}
 
 			kunmap_atomic(vaddr);
 		}
 
-		page = next_page;
+		zsdesc = next_zsdesc;
 		off %= PAGE_SIZE;
 	}
 }