diff mbox series

mm/zsmalloc: Use memcpy_from/to_page whereever possible

Message ID 20241010175143.27262-1-quic_pintu@quicinc.com (mailing list archive)
State New
Headers show
Series mm/zsmalloc: Use memcpy_from/to_page whereever possible | expand

Commit Message

Pintu Kumar Oct. 10, 2024, 5:51 p.m. UTC
As part of [1] we have replaced kmap/kunmap_atomic() with
kmap_local_page()/kunmap_local().

But later it was found that some of the code could be replaced
with already available apis in highmem.h, such as
memcpy_from_page()/memcpy_to_page().

Also, update the comments with correct api naming.

[1] https://lkml.kernel.org/r/20241001175358.12970-1-quic_pintu@quicinc.com

Signed-off-by: Pintu Kumar <quic_pintu@quicinc.com>
Suggested-by: Matthew Wilcox <willy@infradead.org>
Suggested-by: Sergey Senozhatsky <senozhatsky@chromium.org>
---
 mm/zsmalloc.c | 36 +++++++++++++-----------------------
 1 file changed, 13 insertions(+), 23 deletions(-)

Comments

Sergey Senozhatsky Oct. 11, 2024, 2 a.m. UTC | #1
On (24/10/10 23:21), Pintu Kumar wrote:
[..]
> @@ -1511,10 +1501,10 @@ static void zs_object_copy(struct size_class *class, unsigned long dst,
>  		d_size -= size;
>  
>  		/*
> -		 * Calling kunmap_atomic(d_addr) is necessary. kunmap_atomic()
> -		 * calls must occurs in reverse order of calls to kmap_atomic().
> -		 * So, to call kunmap_atomic(s_addr) we should first call
> -		 * kunmap_atomic(d_addr). For more details see
> +		 * Calling kunmap_local(d_addr) is necessary. kunmap_local()
> +		 * calls must occurs in reverse order of calls to kmap_local_page().
> +		 * So, to call kunmap_local(s_addr) we should first call
> +		 * kunmap_local(d_addr). For more details see
>  		 * Documentation/mm/highmem.rst.
>  		 */

I'd prefer this entire comment to be dropped.
Pintu Agarwal Oct. 11, 2024, 2:41 p.m. UTC | #2
On Fri, 11 Oct 2024 at 07:30, Sergey Senozhatsky
<senozhatsky@chromium.org> wrote:
>
> On (24/10/10 23:21), Pintu Kumar wrote:
> [..]
> > @@ -1511,10 +1501,10 @@ static void zs_object_copy(struct size_class *class, unsigned long dst,
> >               d_size -= size;
> >
> >               /*
> > -              * Calling kunmap_atomic(d_addr) is necessary. kunmap_atomic()
> > -              * calls must occurs in reverse order of calls to kmap_atomic().
> > -              * So, to call kunmap_atomic(s_addr) we should first call
> > -              * kunmap_atomic(d_addr). For more details see
> > +              * Calling kunmap_local(d_addr) is necessary. kunmap_local()
> > +              * calls must occurs in reverse order of calls to kmap_local_page().
> > +              * So, to call kunmap_local(s_addr) we should first call
> > +              * kunmap_local(d_addr). For more details see
> >                * Documentation/mm/highmem.rst.
> >                */
>
> I'd prefer this entire comment to be dropped.
Oh I thought the below code for k[un]map_local[_page] stills exists,
so the comments are still valid.
Ok I will remove it in the next patchset.
Looks like there are a few more code improvements possible.

Thank you,
Pintu
diff mbox series

Patch

diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
index d3ff10160a5f..64b66a4d3e6e 100644
--- a/mm/zsmalloc.c
+++ b/mm/zsmalloc.c
@@ -263,7 +263,7 @@  struct zspage {
 struct mapping_area {
 	local_lock_t lock;
 	char *vm_buf; /* copy buffer for objects that span pages */
-	char *vm_addr; /* address of kmap_atomic()'ed pages */
+	char *vm_addr; /* address of kmap_local_page()'ed pages */
 	enum zs_mapmode vm_mm; /* mapping mode */
 };
 
@@ -1046,11 +1046,10 @@  static inline void __zs_cpu_down(struct mapping_area *area)
 static void *__zs_map_object(struct mapping_area *area,
 			struct page *pages[2], int off, int size)
 {
-	int sizes[2];
-	void *addr;
+	size_t sizes[2];
 	char *buf = area->vm_buf;
 
-	/* disable page faults to match kmap_atomic() return conditions */
+	/* disable page faults to match kmap_local_page() return conditions */
 	pagefault_disable();
 
 	/* no read fastpath */
@@ -1061,12 +1060,8 @@  static void *__zs_map_object(struct mapping_area *area,
 	sizes[1] = size - sizes[0];
 
 	/* copy object to per-cpu buffer */
-	addr = kmap_local_page(pages[0]);
-	memcpy(buf, addr + off, sizes[0]);
-	kunmap_local(addr);
-	addr = kmap_local_page(pages[1]);
-	memcpy(buf + sizes[0], addr, sizes[1]);
-	kunmap_local(addr);
+	memcpy_from_page(buf, pages[0], off, sizes[0]);
+	memcpy_from_page(buf + sizes[0], pages[1], 0, sizes[1]);
 out:
 	return area->vm_buf;
 }
@@ -1074,8 +1069,7 @@  static void *__zs_map_object(struct mapping_area *area,
 static void __zs_unmap_object(struct mapping_area *area,
 			struct page *pages[2], int off, int size)
 {
-	int sizes[2];
-	void *addr;
+	size_t sizes[2];
 	char *buf;
 
 	/* no write fastpath */
@@ -1091,15 +1085,11 @@  static void __zs_unmap_object(struct mapping_area *area,
 	sizes[1] = size - sizes[0];
 
 	/* copy per-cpu buffer to object */
-	addr = kmap_local_page(pages[0]);
-	memcpy(addr + off, buf, sizes[0]);
-	kunmap_local(addr);
-	addr = kmap_local_page(pages[1]);
-	memcpy(addr, buf + sizes[0], sizes[1]);
-	kunmap_local(addr);
+	memcpy_to_page(pages[0], off, buf, sizes[0]);
+	memcpy_to_page(pages[1], 0, buf + sizes[0], sizes[1]);
 
 out:
-	/* enable page faults to match kunmap_atomic() return conditions */
+	/* enable page faults to match kunmap_local() return conditions */
 	pagefault_enable();
 }
 
@@ -1511,10 +1501,10 @@  static void zs_object_copy(struct size_class *class, unsigned long dst,
 		d_size -= size;
 
 		/*
-		 * Calling kunmap_atomic(d_addr) is necessary. kunmap_atomic()
-		 * calls must occurs in reverse order of calls to kmap_atomic().
-		 * So, to call kunmap_atomic(s_addr) we should first call
-		 * kunmap_atomic(d_addr). For more details see
+		 * Calling kunmap_local(d_addr) is necessary. kunmap_local()
+		 * calls must occurs in reverse order of calls to kmap_local_page().
+		 * So, to call kunmap_local(s_addr) we should first call
+		 * kunmap_local(d_addr). For more details see
 		 * Documentation/mm/highmem.rst.
 		 */
 		if (s_off >= PAGE_SIZE) {