diff mbox series

[2/2] mm/zsmalloc: move record_obj() into obj_malloc()

Message ID 20240627075959.611783-2-chengming.zhou@linux.dev (mailing list archive)
State New
Headers show
Series [1/2] mm/zsmalloc: fix class per-fullness zspage counts | expand

Commit Message

Chengming Zhou June 27, 2024, 7:59 a.m. UTC
We always record_obj() to make handle points to object after obj_malloc(),
so simplify the code by moving record_obj() into obj_malloc(). There
should be no functional change.

Signed-off-by: Chengming Zhou <chengming.zhou@linux.dev>
---
 mm/zsmalloc.c | 15 ++++++---------
 1 file changed, 6 insertions(+), 9 deletions(-)

Comments

Sergey Senozhatsky June 28, 2024, 2:25 a.m. UTC | #1
On (24/06/27 15:59), Chengming Zhou wrote:
> We always record_obj() to make handle points to object after obj_malloc(),
> so simplify the code by moving record_obj() into obj_malloc(). There
> should be no functional change.
> 
> Signed-off-by: Chengming Zhou <chengming.zhou@linux.dev>

I guess I don't have a strong opinion on that.

FWIW
Reviewed-by: Sergey Senozhatsky <senozhatsky@chromium.org>

[..]
> @@ -1591,7 +1589,6 @@ static void migrate_zspage(struct zs_pool *pool, struct zspage *src_zspage,
>  		free_obj = obj_malloc(pool, dst_zspage, handle);
>  		zs_object_copy(class, free_obj, used_obj);
>  		obj_idx++;
> -		record_obj(handle, free_obj);
>  		obj_free(class->size, used_obj);

I sort of like how here we would copy the object first and then record it,
some sort of "commit" stage.  But I don't see any issues with the new code.
diff mbox series

Patch

diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
index 7fc25fa4e6b3..c2f4e62ffb46 100644
--- a/mm/zsmalloc.c
+++ b/mm/zsmalloc.c
@@ -1306,7 +1306,6 @@  static unsigned long obj_malloc(struct zs_pool *pool,
 	void *vaddr;
 
 	class = pool->size_class[zspage->class];
-	handle |= OBJ_ALLOCATED_TAG;
 	obj = get_freeobj(zspage);
 
 	offset = obj * class->size;
@@ -1322,15 +1321,16 @@  static unsigned long obj_malloc(struct zs_pool *pool,
 	set_freeobj(zspage, link->next >> OBJ_TAG_BITS);
 	if (likely(!ZsHugePage(zspage)))
 		/* record handle in the header of allocated chunk */
-		link->handle = handle;
+		link->handle = handle | OBJ_ALLOCATED_TAG;
 	else
 		/* record handle to page->index */
-		zspage->first_page->index = handle;
+		zspage->first_page->index = handle | OBJ_ALLOCATED_TAG;
 
 	kunmap_atomic(vaddr);
 	mod_zspage_inuse(zspage, 1);
 
 	obj = location_to_obj(m_page, obj);
+	record_obj(handle, obj);
 
 	return obj;
 }
@@ -1348,7 +1348,7 @@  static unsigned long obj_malloc(struct zs_pool *pool,
  */
 unsigned long zs_malloc(struct zs_pool *pool, size_t size, gfp_t gfp)
 {
-	unsigned long handle, obj;
+	unsigned long handle;
 	struct size_class *class;
 	int newfg;
 	struct zspage *zspage;
@@ -1371,10 +1371,9 @@  unsigned long zs_malloc(struct zs_pool *pool, size_t size, gfp_t gfp)
 	spin_lock(&class->lock);
 	zspage = find_get_zspage(class);
 	if (likely(zspage)) {
-		obj = obj_malloc(pool, zspage, handle);
+		obj_malloc(pool, zspage, handle);
 		/* Now move the zspage to another fullness group, if required */
 		fix_fullness_group(class, zspage);
-		record_obj(handle, obj);
 		class_stat_inc(class, ZS_OBJS_INUSE, 1);
 
 		goto out;
@@ -1389,10 +1388,9 @@  unsigned long zs_malloc(struct zs_pool *pool, size_t size, gfp_t gfp)
 	}
 
 	spin_lock(&class->lock);
-	obj = obj_malloc(pool, zspage, handle);
+	obj_malloc(pool, zspage, handle);
 	newfg = get_fullness_group(class, zspage);
 	insert_zspage(class, zspage, newfg);
-	record_obj(handle, obj);
 	atomic_long_add(class->pages_per_zspage, &pool->pages_allocated);
 	class_stat_inc(class, ZS_OBJS_ALLOCATED, class->objs_per_zspage);
 	class_stat_inc(class, ZS_OBJS_INUSE, 1);
@@ -1591,7 +1589,6 @@  static void migrate_zspage(struct zs_pool *pool, struct zspage *src_zspage,
 		free_obj = obj_malloc(pool, dst_zspage, handle);
 		zs_object_copy(class, free_obj, used_obj);
 		obj_idx++;
-		record_obj(handle, free_obj);
 		obj_free(class->size, used_obj);
 
 		/* Stop if there is no more space */