diff mbox series

[RFC,07/25] mm/zsmalloc: convert obj_to_location() and its users to use zsdesc

Message ID 20230220132218.546369-8-42.hyeyoo@gmail.com (mailing list archive)
State New
Headers show
Series mm/zsmalloc: Split zsdesc from struct page | expand

Commit Message

Hyeonggon Yoo Feb. 20, 2023, 1:22 p.m. UTC
Convert obj_to_location() to take zsdesc and also convert its users
to use zsdesc.

Signed-off-by: Hyeonggon Yoo <42.hyeyoo@gmail.com>
---
 mm/zsmalloc.c | 80 +++++++++++++++++++++++++--------------------------
 1 file changed, 40 insertions(+), 40 deletions(-)
diff mbox series

Patch

diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
index 821d72ab888c..56cb93629c7f 100644
--- a/mm/zsmalloc.c
+++ b/mm/zsmalloc.c
@@ -1007,16 +1007,16 @@  static __maybe_unused struct zsdesc *get_next_zsdesc(struct zsdesc *zsdesc)
 }
 
 /**
- * obj_to_location - get (<page>, <obj_idx>) from encoded object value
+ * obj_to_location - get (<zsdesc>, <obj_idx>) from encoded object value
  * @obj: the encoded object value
- * @page: page object resides in zspage
+ * @zsdesc: zsdesc object resides in zspage
  * @obj_idx: object index
  */
-static void obj_to_location(unsigned long obj, struct page **page,
+static void obj_to_location(unsigned long obj, struct zsdesc **zsdesc,
 				unsigned int *obj_idx)
 {
 	obj >>= OBJ_TAG_BITS;
-	*page = pfn_to_page(obj >> OBJ_INDEX_BITS);
+	*zsdesc = pfn_zsdesc(obj >> OBJ_INDEX_BITS);
 	*obj_idx = (obj & OBJ_INDEX_MASK);
 }
 
@@ -1498,13 +1498,13 @@  void *zs_map_object(struct zs_pool *pool, unsigned long handle,
 			enum zs_mapmode mm)
 {
 	struct zspage *zspage;
-	struct page *page;
+	struct zsdesc *zsdesc;
 	unsigned long obj, off;
 	unsigned int obj_idx;
 
 	struct size_class *class;
 	struct mapping_area *area;
-	struct page *pages[2];
+	struct zsdesc *zsdescs[2];
 	void *ret;
 
 	/*
@@ -1517,8 +1517,8 @@  void *zs_map_object(struct zs_pool *pool, unsigned long handle,
 	/* It guarantees it can get zspage from handle safely */
 	spin_lock(&pool->lock);
 	obj = handle_to_obj(handle);
-	obj_to_location(obj, &page, &obj_idx);
-	zspage = get_zspage(page);
+	obj_to_location(obj, &zsdesc, &obj_idx);
+	zspage = get_zspage(zsdesc_page(zsdesc));
 
 #ifdef CONFIG_ZPOOL
 	/*
@@ -1561,18 +1561,18 @@  void *zs_map_object(struct zs_pool *pool, unsigned long handle,
 	area = this_cpu_ptr(&zs_map_area);
 	area->vm_mm = mm;
 	if (off + class->size <= PAGE_SIZE) {
-		/* this object is contained entirely within a page */
-		area->vm_addr = kmap_atomic(page);
+		/* this object is contained entirely within a zsdesc */
+		area->vm_addr = zsdesc_kmap_atomic(zsdesc);
 		ret = area->vm_addr + off;
 		goto out;
 	}
 
-	/* this object spans two pages */
-	pages[0] = page;
-	pages[1] = get_next_page(page);
-	BUG_ON(!pages[1]);
+	/* this object spans two zsdescs */
+	zsdescs[0] = zsdesc;
+	zsdescs[1] = get_next_zsdesc(zsdesc);
+	BUG_ON(!zsdescs[1]);
 
-	ret = __zs_map_object(area, (struct zsdesc **)pages, off, class->size);
+	ret = __zs_map_object(area, zsdescs, off, class->size);
 out:
 	if (likely(!ZsHugePage(zspage)))
 		ret += ZS_HANDLE_SIZE;
@@ -1584,7 +1584,7 @@  EXPORT_SYMBOL_GPL(zs_map_object);
 void zs_unmap_object(struct zs_pool *pool, unsigned long handle)
 {
 	struct zspage *zspage;
-	struct page *page;
+	struct zsdesc *zsdesc;
 	unsigned long obj, off;
 	unsigned int obj_idx;
 
@@ -1592,8 +1592,8 @@  void zs_unmap_object(struct zs_pool *pool, unsigned long handle)
 	struct mapping_area *area;
 
 	obj = handle_to_obj(handle);
-	obj_to_location(obj, &page, &obj_idx);
-	zspage = get_zspage(page);
+	obj_to_location(obj, &zsdesc, &obj_idx);
+	zspage = get_zspage(zsdesc_page(zsdesc));
 	class = zspage_class(pool, zspage);
 	off = (class->size * obj_idx) & ~PAGE_MASK;
 
@@ -1601,13 +1601,13 @@  void zs_unmap_object(struct zs_pool *pool, unsigned long handle)
 	if (off + class->size <= PAGE_SIZE)
 		kunmap_atomic(area->vm_addr);
 	else {
-		struct page *pages[2];
+		struct zsdesc *zsdescs[2];
 
-		pages[0] = page;
-		pages[1] = get_next_page(page);
-		BUG_ON(!pages[1]);
+		zsdescs[0] = zsdesc;
+		zsdescs[1] = get_next_zsdesc(zsdesc);
+		BUG_ON(!zsdescs[1]);
 
-		__zs_unmap_object(area, (struct zsdesc **)pages, off, class->size);
+		__zs_unmap_object(area, zsdescs, off, class->size);
 	}
 	local_unlock(&zs_map_area.lock);
 
@@ -1750,16 +1750,16 @@  static void obj_free(int class_size, unsigned long obj, unsigned long *handle)
 {
 	struct link_free *link;
 	struct zspage *zspage;
-	struct page *f_page;
+	struct zsdesc *f_zsdesc;
 	unsigned long f_offset;
 	unsigned int f_objidx;
 	void *vaddr;
 
-	obj_to_location(obj, &f_page, &f_objidx);
+	obj_to_location(obj, &f_zsdesc, &f_objidx);
 	f_offset = (class_size * f_objidx) & ~PAGE_MASK;
-	zspage = get_zspage(f_page);
+	zspage = get_zspage(zsdesc_page(f_zsdesc));
 
-	vaddr = kmap_atomic(f_page);
+	vaddr = zsdesc_kmap_atomic(f_zsdesc);
 	link = (struct link_free *)(vaddr + f_offset);
 
 	if (handle) {
@@ -1771,14 +1771,14 @@  static void obj_free(int class_size, unsigned long obj, unsigned long *handle)
 		if (likely(!ZsHugePage(zspage)))
 			link->deferred_handle = *handle;
 		else
-			f_page->index = *handle;
+			f_zsdesc->handle = *handle;
 #endif
 	} else {
 		/* Insert this object in containing zspage's freelist */
 		if (likely(!ZsHugePage(zspage)))
 			link->next = get_freeobj(zspage) << OBJ_TAG_BITS;
 		else
-			f_page->index = 0;
+			f_zsdesc->next = NULL;
 		set_freeobj(zspage, f_objidx);
 	}
 
@@ -1836,7 +1836,7 @@  EXPORT_SYMBOL_GPL(zs_free);
 static void zs_object_copy(struct size_class *class, unsigned long dst,
 				unsigned long src)
 {
-	struct page *s_page, *d_page;
+	struct zsdesc *s_zsdesc, *d_zsdesc;
 	unsigned int s_objidx, d_objidx;
 	unsigned long s_off, d_off;
 	void *s_addr, *d_addr;
@@ -1845,8 +1845,8 @@  static void zs_object_copy(struct size_class *class, unsigned long dst,
 
 	s_size = d_size = class->size;
 
-	obj_to_location(src, &s_page, &s_objidx);
-	obj_to_location(dst, &d_page, &d_objidx);
+	obj_to_location(src, &s_zsdesc, &s_objidx);
+	obj_to_location(dst, &d_zsdesc, &d_objidx);
 
 	s_off = (class->size * s_objidx) & ~PAGE_MASK;
 	d_off = (class->size * d_objidx) & ~PAGE_MASK;
@@ -1857,8 +1857,8 @@  static void zs_object_copy(struct size_class *class, unsigned long dst,
 	if (d_off + class->size > PAGE_SIZE)
 		d_size = PAGE_SIZE - d_off;
 
-	s_addr = kmap_atomic(s_page);
-	d_addr = kmap_atomic(d_page);
+	s_addr = zsdesc_kmap_atomic(s_zsdesc);
+	d_addr = zsdesc_kmap_atomic(d_zsdesc);
 
 	while (1) {
 		size = min(s_size, d_size);
@@ -1883,17 +1883,17 @@  static void zs_object_copy(struct size_class *class, unsigned long dst,
 		if (s_off >= PAGE_SIZE) {
 			kunmap_atomic(d_addr);
 			kunmap_atomic(s_addr);
-			s_page = get_next_page(s_page);
-			s_addr = kmap_atomic(s_page);
-			d_addr = kmap_atomic(d_page);
+			s_zsdesc = get_next_zsdesc(s_zsdesc);
+			s_addr = zsdesc_kmap_atomic(s_zsdesc);
+			d_addr = zsdesc_kmap_atomic(d_zsdesc);
 			s_size = class->size - written;
 			s_off = 0;
 		}
 
 		if (d_off >= PAGE_SIZE) {
 			kunmap_atomic(d_addr);
-			d_page = get_next_page(d_page);
-			d_addr = kmap_atomic(d_page);
+			d_zsdesc = get_next_zsdesc(d_zsdesc);
+			d_addr = zsdesc_kmap_atomic(d_zsdesc);
 			d_size = class->size - written;
 			d_off = 0;
 		}
@@ -2200,7 +2200,7 @@  static int zs_page_migrate(struct page *newpage, struct page *page,
 	struct zs_pool *pool;
 	struct size_class *class;
 	struct zspage *zspage;
-	struct page *dummy;
+	struct zsdesc *dummy;
 	void *s_addr, *d_addr, *addr;
 	unsigned int offset;
 	unsigned long handle;