diff mbox series

[RFC,19/25] mm/zsmalloc: convert zs_compact_control and its users to use zsdesc

Message ID 20230220132218.546369-20-42.hyeyoo@gmail.com (mailing list archive)
State New
Headers show
Series mm/zsmalloc: Split zsdesc from struct page | expand

Commit Message

Hyeonggon Yoo Feb. 20, 2023, 1:22 p.m. UTC
Convert struct zs_compact_control to use zsdesc, update comments
accordingly, and also convert its users.

Signed-off-by: Hyeonggon Yoo <42.hyeyoo@gmail.com>
---
 mm/zsmalloc.c | 30 +++++++++++++++---------------
 1 file changed, 15 insertions(+), 15 deletions(-)
diff mbox series

Patch

diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
index b58821b3494b..488dc570d660 100644
--- a/mm/zsmalloc.c
+++ b/mm/zsmalloc.c
@@ -1956,12 +1956,12 @@  static unsigned long find_deferred_handle_obj(struct size_class *class,
 #endif
 
 struct zs_compact_control {
-	/* Source spage for migration which could be a subpage of zspage */
-	struct page *s_page;
-	/* Destination page for migration which should be a first page
+	/* Source zsdesc for migration which could be a sub-zsdesc of zspage */
+	struct zsdesc *s_zsdesc;
+	/* Destination zsdesc for migration which should be a first zsdesc
 	 * of zspage. */
-	struct page *d_page;
-	 /* Starting object index within @s_page which used for live object
+	struct zsdesc *d_zsdesc;
+	 /* Starting object index within @s_zsdesc which used for live object
 	  * in the subpage. */
 	int obj_idx;
 };
@@ -1971,29 +1971,29 @@  static int migrate_zspage(struct zs_pool *pool, struct size_class *class,
 {
 	unsigned long used_obj, free_obj;
 	unsigned long handle;
-	struct page *s_page = cc->s_page;
-	struct page *d_page = cc->d_page;
+	struct zsdesc *s_zsdesc = cc->s_zsdesc;
+	struct zsdesc *d_zsdesc = cc->d_zsdesc;
 	int obj_idx = cc->obj_idx;
 	int ret = 0;
 
 	while (1) {
-		handle = find_alloced_obj(class, page_zsdesc(s_page), &obj_idx);
+		handle = find_alloced_obj(class, s_zsdesc, &obj_idx);
 		if (!handle) {
-			s_page = get_next_page(s_page);
-			if (!s_page)
+			s_zsdesc = get_next_zsdesc(s_zsdesc);
+			if (!s_zsdesc)
 				break;
 			obj_idx = 0;
 			continue;
 		}
 
 		/* Stop if there is no more space */
-		if (zspage_full(class, get_zspage(d_page))) {
+		if (zspage_full(class, get_zspage(zsdesc_page(d_zsdesc)))) {
 			ret = -ENOMEM;
 			break;
 		}
 
 		used_obj = handle_to_obj(handle);
-		free_obj = obj_malloc(pool, get_zspage(d_page), handle);
+		free_obj = obj_malloc(pool, get_zspage(zsdesc_page(d_zsdesc)), handle);
 		zs_object_copy(class, free_obj, used_obj);
 		obj_idx++;
 		record_obj(handle, free_obj);
@@ -2001,7 +2001,7 @@  static int migrate_zspage(struct zs_pool *pool, struct size_class *class,
 	}
 
 	/* Remember last position in this iteration */
-	cc->s_page = s_page;
+	cc->s_zsdesc = s_zsdesc;
 	cc->obj_idx = obj_idx;
 
 	return ret;
@@ -2410,12 +2410,12 @@  static unsigned long __zs_compact(struct zs_pool *pool,
 			break;
 
 		cc.obj_idx = 0;
-		cc.s_page = get_first_page(src_zspage);
+		cc.s_zsdesc = get_first_zsdesc(src_zspage);
 
 		while ((dst_zspage = isolate_zspage(class, false))) {
 			migrate_write_lock_nested(dst_zspage);
 
-			cc.d_page = get_first_page(dst_zspage);
+			cc.d_zsdesc = get_first_zsdesc(dst_zspage);
 			/*
 			 * If there is no more space in dst_page, resched
 			 * and see if anyone had allocated another zspage.