diff mbox series

[v6] crypto: scompress: remove memcpy if sg_nents is 1 and pages are lowmem

Message ID 20240222020617.31041-1-21cnbao@gmail.com (mailing list archive)
State Changes Requested
Delegated to: Herbert Xu
Headers show
Series [v6] crypto: scompress: remove memcpy if sg_nents is 1 and pages are lowmem | expand

Commit Message

Barry Song Feb. 22, 2024, 2:06 a.m. UTC
From: Barry Song <v-songbaohua@oppo.com>

while sg_nents is 1, which is always true for the current kernel
as the only user - zswap is this case, we might have a chance to
remove memcpy, thus improve the performance.
Though sg_nents is 1, its buffer might cross two pages. If those
pages are highmem, we have no cheap way to map them to contiguous
virtual address because kmap doesn't support more than one page
(kmap single higmem page could be still expensive for tlb) and
vmap is expensive.
So we also test and enure page is not highmem in order to safely
use page_to_virt before removing the memcpy. The good news is
that in the most majority of cases, we are lowmem, and we are
always lowmem in those modern and popular hardware.

Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Nhat Pham <nphamcs@gmail.com>
Cc: Yosry Ahmed <yosryahmed@google.com>
Signed-off-by: Barry Song <v-songbaohua@oppo.com>
Tested-by: Chengming Zhou <zhouchengming@bytedance.com>
---
 -v6:
  * separate it from zswap & crypto series[1] according to Herbert;
  * make sure the code is still safe when buffers cross two pages
    to address Herbert's comment
[1] https://lore.kernel.org/linux-mm/20240220064414.262582-1-21cnbao@gmail.com/

 crypto/scompress.c | 31 ++++++++++++++++++++++++-------
 1 file changed, 24 insertions(+), 7 deletions(-)

Comments

Herbert Xu March 1, 2024, 10:27 a.m. UTC | #1
On Thu, Feb 22, 2024 at 03:06:17PM +1300, Barry Song wrote:
>
> -	scatterwalk_map_and_copy(scratch->src, req->src, 0, req->slen, 0);
> +	if (sg_nents(req->src) == 1 && !PageHighMem(sg_page(req->src))) {
> +		src = page_to_virt(sg_page(req->src)) + req->src->offset;

Incidentally this made me look at other uses of PageHighMem in
the kernel.

The one in copy_page_from_iter_atomic looks buggy because it assumes
that the kmap never maps a page if PageHighMem is false, which is not
the case for CONFIG_DEBUG_KMAP_LOCAL_FORCE_MAP.

> @@ -152,8 +165,12 @@ static int scomp_acomp_comp_decomp(struct acomp_req *req, int dir)
>  			ret = -ENOSPC;
>  			goto out;
>  		}
> -		scatterwalk_map_and_copy(scratch->dst, req->dst, 0, req->dlen,
> -					 1);
> +		if (dst == scratch->dst) {
> +			scatterwalk_map_and_copy(scratch->dst, req->dst, 0,
> +						 req->dlen, 1);
> +		} else {
> +			flush_dcache_page(sg_page(req->dst));

I think this is still wrong for the > PAGE_SIZE case.  The existing
code flushes each page sequentially  but the new code only flushes the
first page.

Thanks,
Barry Song March 1, 2024, 11:17 a.m. UTC | #2
On Fri, Mar 1, 2024 at 11:28 PM Herbert Xu <herbert@gondor.apana.org.au> wrote:
>
> On Thu, Feb 22, 2024 at 03:06:17PM +1300, Barry Song wrote:
> >
> > -     scatterwalk_map_and_copy(scratch->src, req->src, 0, req->slen, 0);
> > +     if (sg_nents(req->src) == 1 && !PageHighMem(sg_page(req->src))) {
> > +             src = page_to_virt(sg_page(req->src)) + req->src->offset;
>
> Incidentally this made me look at other uses of PageHighMem in
> the kernel.
>
> The one in copy_page_from_iter_atomic looks buggy because it assumes
> that the kmap never maps a page if PageHighMem is false, which is not
> the case for CONFIG_DEBUG_KMAP_LOCAL_FORCE_MAP.

You are right. This needs to be fixed.

>
> > @@ -152,8 +165,12 @@ static int scomp_acomp_comp_decomp(struct acomp_req *req, int dir)
> >                       ret = -ENOSPC;
> >                       goto out;
> >               }
> > -             scatterwalk_map_and_copy(scratch->dst, req->dst, 0, req->dlen,
> > -                                      1);
> > +             if (dst == scratch->dst) {
> > +                     scatterwalk_map_and_copy(scratch->dst, req->dst, 0,
> > +                                              req->dlen, 1);
> > +             } else {
> > +                     flush_dcache_page(sg_page(req->dst));
>
> I think this is still wrong for the > PAGE_SIZE case.  The existing
> code flushes each page sequentially  but the new code only flushes the
> first page.

right, can it be fixed like the below?

diff --git a/crypto/scompress.c b/crypto/scompress.c
index 185d2359f28b..d85f0318f273 100644
--- a/crypto/scompress.c
+++ b/crypto/scompress.c
@@ -169,7 +169,11 @@ static int scomp_acomp_comp_decomp(struct
acomp_req *req, int dir)
                        scatterwalk_map_and_copy(scratch->dst, req->dst, 0,
                                                 req->dlen, 1);
                } else {
-                       flush_dcache_page(sg_page(req->dst));
+                       int nr_pages = DIV_ROUND_UP(req->dst->offset +
req->dlen, PAGE_SIZE);
+                       int i;
+
+                       for (i = 0; i < nr_pages; i++)
+                               flush_dcache_page(sg_page(req->dst) + i);
                }
        }
 out:

>
> Thanks,
> --
> Email: Herbert Xu <herbert@gondor.apana.org.au>
> Home Page: http://gondor.apana.org.au/~herbert/
> PGP Key: http://gondor.apana.org.au/~herbert/pubkey.txt
>

Thanks
Barry
diff mbox series

Patch

diff --git a/crypto/scompress.c b/crypto/scompress.c
index b108a30a7600..185d2359f28b 100644
--- a/crypto/scompress.c
+++ b/crypto/scompress.c
@@ -117,6 +117,7 @@  static int scomp_acomp_comp_decomp(struct acomp_req *req, int dir)
 	struct crypto_scomp *scomp = *tfm_ctx;
 	void **ctx = acomp_request_ctx(req);
 	struct scomp_scratch *scratch;
+	void *src, *dst;
 	unsigned int dlen;
 	int ret;
 
@@ -134,13 +135,25 @@  static int scomp_acomp_comp_decomp(struct acomp_req *req, int dir)
 	scratch = raw_cpu_ptr(&scomp_scratch);
 	spin_lock(&scratch->lock);
 
-	scatterwalk_map_and_copy(scratch->src, req->src, 0, req->slen, 0);
+	if (sg_nents(req->src) == 1 && !PageHighMem(sg_page(req->src))) {
+		src = page_to_virt(sg_page(req->src)) + req->src->offset;
+	} else {
+		scatterwalk_map_and_copy(scratch->src, req->src, 0,
+					 req->slen, 0);
+		src = scratch->src;
+	}
+
+	if (req->dst && sg_nents(req->dst) == 1 && !PageHighMem(sg_page(req->dst)))
+		dst = page_to_virt(sg_page(req->dst)) + req->dst->offset;
+	else
+		dst = scratch->dst;
+
 	if (dir)
-		ret = crypto_scomp_compress(scomp, scratch->src, req->slen,
-					    scratch->dst, &req->dlen, *ctx);
+		ret = crypto_scomp_compress(scomp, src, req->slen,
+					    dst, &req->dlen, *ctx);
 	else
-		ret = crypto_scomp_decompress(scomp, scratch->src, req->slen,
-					      scratch->dst, &req->dlen, *ctx);
+		ret = crypto_scomp_decompress(scomp, src, req->slen,
+					      dst, &req->dlen, *ctx);
 	if (!ret) {
 		if (!req->dst) {
 			req->dst = sgl_alloc(req->dlen, GFP_ATOMIC, NULL);
@@ -152,8 +165,12 @@  static int scomp_acomp_comp_decomp(struct acomp_req *req, int dir)
 			ret = -ENOSPC;
 			goto out;
 		}
-		scatterwalk_map_and_copy(scratch->dst, req->dst, 0, req->dlen,
-					 1);
+		if (dst == scratch->dst) {
+			scatterwalk_map_and_copy(scratch->dst, req->dst, 0,
+						 req->dlen, 1);
+		} else {
+			flush_dcache_page(sg_page(req->dst));
+		}
 	}
 out:
 	spin_unlock(&scratch->lock);