Message ID | e858dadf36f7fc2c12545c648dda4645f48cab22.1741688305.git.herbert@gondor.apana.org.au (mailing list archive) |
---|---|
State | Changes Requested |
Delegated to: | Herbert Xu |
Headers | show |
Series | crypto: Use nth_page instead of doing it by hand | expand |
On Tue, Mar 11, 2025 at 06:20:31PM +0800, Herbert Xu wrote: > Use nth_page instead of adding n to the page pointer. > > This also fixes a real bug in shash_ahash_digest where the the > check for continguous hash data may be incorrect in the presence > of highmem. This could result in an incorrect hash or worse. > > Fixes: 5f7082ed4f48 ("crypto: hash - Export shash through hash") > Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au> > --- > crypto/ahash.c | 38 +++++++++++++++++++++++++------------- > 1 file changed, 25 insertions(+), 13 deletions(-) > > diff --git a/crypto/ahash.c b/crypto/ahash.c > index 9c26175c21a8..75d642897e36 100644 > --- a/crypto/ahash.c > +++ b/crypto/ahash.c > @@ -16,6 +16,7 @@ > #include <linux/cryptouser.h> > #include <linux/err.h> > #include <linux/kernel.h> > +#include <linux/mm.h> > #include <linux/module.h> > #include <linux/sched.h> > #include <linux/slab.h> > @@ -79,7 +80,7 @@ static int hash_walk_new_entry(struct crypto_hash_walk *walk) > > sg = walk->sg; > walk->offset = sg->offset; > - walk->pg = sg_page(walk->sg) + (walk->offset >> PAGE_SHIFT); > + walk->pg = nth_page(sg_page(walk->sg), walk->offset >> PAGE_SHIFT); > walk->offset = offset_in_page(walk->offset); > walk->entrylen = sg->length; > > @@ -201,25 +202,36 @@ int shash_ahash_digest(struct ahash_request *req, struct shash_desc *desc) > unsigned int nbytes = req->nbytes; > struct scatterlist *sg; > unsigned int offset; > + struct page *page; > + void *data; > int err; > > - if (ahash_request_isvirt(req)) > + if (!nbytes || ahash_request_isvirt(req)) > return crypto_shash_digest(desc, req->svirt, nbytes, > req->result); > > - if (nbytes && > - (sg = req->src, offset = sg->offset, > - nbytes <= min(sg->length, ((unsigned int)(PAGE_SIZE)) - offset))) { > - void *data; > + sg = req->src; > + if (nbytes > sg->length) > + return crypto_shash_init(desc) ?: > + shash_ahash_finup(req, desc); > > - data = kmap_local_page(sg_page(sg)); > - err = crypto_shash_digest(desc, data + offset, nbytes, > - req->result); > - kunmap_local(data); > - } else > - err = crypto_shash_init(desc) ?: > - shash_ahash_finup(req, desc); > + page = sg_page(sg); > + offset = sg->offset; > + page = nth_page(page, offset >> PAGE_SHIFT); > + offset = offset_in_page(offset); > > + if (!IS_ENABLED(CONFIG_HIGHMEM)) > + return crypto_shash_digest(desc, page_address(page) + offset, > + nbytes, req->result); > + > + if (nbytes > (unsigned int)PAGE_SIZE - offset) > + return crypto_shash_init(desc) ?: > + shash_ahash_finup(req, desc); > + > + data = kmap_local_page(page); > + err = crypto_shash_digest(desc, data + offset, nbytes, > + req->result); > + kunmap_local(data); > return err; I guess you think this is fixing a bug in the case where sg->offset > PAGE_SIZE? Is that case even supported? It is supposed to be the offset into a page. Even if so, a simpler fix (1 line) would be to use: 'sg->length >= nbytes && sg->offset + nbytes <= PAGE_SIZE' - Eric
On Tue, Mar 11, 2025 at 10:44:31AM -0700, Eric Biggers wrote: > On Tue, Mar 11, 2025 at 06:20:31PM +0800, Herbert Xu wrote: > > Use nth_page instead of adding n to the page pointer. > > > > This also fixes a real bug in shash_ahash_digest where the the > > check for continguous hash data may be incorrect in the presence > > of highmem. This could result in an incorrect hash or worse. > > > > Fixes: 5f7082ed4f48 ("crypto: hash - Export shash through hash") > > Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au> > > --- > > crypto/ahash.c | 38 +++++++++++++++++++++++++------------- > > 1 file changed, 25 insertions(+), 13 deletions(-) > > > > diff --git a/crypto/ahash.c b/crypto/ahash.c > > index 9c26175c21a8..75d642897e36 100644 > > --- a/crypto/ahash.c > > +++ b/crypto/ahash.c > > @@ -16,6 +16,7 @@ > > #include <linux/cryptouser.h> > > #include <linux/err.h> > > #include <linux/kernel.h> > > +#include <linux/mm.h> > > #include <linux/module.h> > > #include <linux/sched.h> > > #include <linux/slab.h> > > @@ -79,7 +80,7 @@ static int hash_walk_new_entry(struct crypto_hash_walk *walk) > > > > sg = walk->sg; > > walk->offset = sg->offset; > > - walk->pg = sg_page(walk->sg) + (walk->offset >> PAGE_SHIFT); > > + walk->pg = nth_page(sg_page(walk->sg), walk->offset >> PAGE_SHIFT); > > walk->offset = offset_in_page(walk->offset); > > walk->entrylen = sg->length; > > > > @@ -201,25 +202,36 @@ int shash_ahash_digest(struct ahash_request *req, struct shash_desc *desc) > > unsigned int nbytes = req->nbytes; > > struct scatterlist *sg; > > unsigned int offset; > > + struct page *page; > > + void *data; > > int err; > > > > - if (ahash_request_isvirt(req)) > > + if (!nbytes || ahash_request_isvirt(req)) > > return crypto_shash_digest(desc, req->svirt, nbytes, > > req->result); > > > > - if (nbytes && > > - (sg = req->src, offset = sg->offset, > > - nbytes <= min(sg->length, ((unsigned int)(PAGE_SIZE)) - offset))) { > > - void *data; > > + sg = req->src; > > + if (nbytes > sg->length) > > + return crypto_shash_init(desc) ?: > > + shash_ahash_finup(req, desc); > > > > - data = kmap_local_page(sg_page(sg)); > > - err = crypto_shash_digest(desc, data + offset, nbytes, > > - req->result); > > - kunmap_local(data); > > - } else > > - err = crypto_shash_init(desc) ?: > > - shash_ahash_finup(req, desc); > > + page = sg_page(sg); > > + offset = sg->offset; > > + page = nth_page(page, offset >> PAGE_SHIFT); > > + offset = offset_in_page(offset); > > > > + if (!IS_ENABLED(CONFIG_HIGHMEM)) > > + return crypto_shash_digest(desc, page_address(page) + offset, > > + nbytes, req->result); > > + > > + if (nbytes > (unsigned int)PAGE_SIZE - offset) > > + return crypto_shash_init(desc) ?: > > + shash_ahash_finup(req, desc); > > + > > + data = kmap_local_page(page); > > + err = crypto_shash_digest(desc, data + offset, nbytes, > > + req->result); > > + kunmap_local(data); > > return err; > > I guess you think this is fixing a bug in the case where sg->offset > PAGE_SIZE? > Is that case even supported? It is supposed to be the offset into a page. > > Even if so, a simpler fix (1 line) would be to use: > 'sg->length >= nbytes && sg->offset + nbytes <= PAGE_SIZE' Or just make this optimization specific to !HIGHMEM, and use nbytes <= sg->length and sg_virt(). - Eric
On Tue, Mar 11, 2025 at 10:44:31AM -0700, Eric Biggers wrote: > > I guess you think this is fixing a bug in the case where sg->offset > PAGE_SIZE? > Is that case even supported? It is supposed to be the offset into a page. Supported? Obviously not since this bug has been there since the very start :) But is it conceivable to create such a scatterlist, it certainly seems to be. If we were to create a scatterlist from a single order-n folio, then it's quite reasonable for the offset to be greater than PAGE_SIZE. > Even if so, a simpler fix (1 line) would be to use: > 'sg->length >= nbytes && sg->offset + nbytes <= PAGE_SIZE' That would just mean more use of the fallback code path. Not a big deal but I was feeling generous. Thanks,
On Wed, Mar 12, 2025 at 10:30:06AM +0800, Herbert Xu wrote: > On Tue, Mar 11, 2025 at 10:44:31AM -0700, Eric Biggers wrote: > > > > I guess you think this is fixing a bug in the case where sg->offset > PAGE_SIZE? > > Is that case even supported? It is supposed to be the offset into a page. > > Supported? Obviously not since this bug has been there since the > very start :) > > But is it conceivable to create such a scatterlist, it certainly > seems to be. If we were to create a scatterlist from a single > order-n folio, then it's quite reasonable for the offset to be > greater than PAGE_SIZE. If it needs to work, then it needs to be tested. Currently the self-tests always use sg_set_buf(), and thus scatterlist::offset always gets set to a value less than PAGE_SIZE. And this is yet more evidence that scatterlist based APIs are just a really bad idea. Even 20 years later, there is still no precise definition of what a scatterlist even is... > > Even if so, a simpler fix (1 line) would be to use: > > 'sg->length >= nbytes && sg->offset + nbytes <= PAGE_SIZE' > > That would just mean more use of the fallback code path. Not > a big deal but I was feeling generous. My second suggestion, making it conditional on !HIGHMEM and using nbytes <= sg->length, would also be simple and would reduce the overhead for !HIGHMEM (rather than increasing it as your patch does, just like patch 1 which had the same problem). !HIGHMEM is the common case (by far) these days, of course. - Eric
diff --git a/crypto/ahash.c b/crypto/ahash.c index 9c26175c21a8..75d642897e36 100644 --- a/crypto/ahash.c +++ b/crypto/ahash.c @@ -16,6 +16,7 @@ #include <linux/cryptouser.h> #include <linux/err.h> #include <linux/kernel.h> +#include <linux/mm.h> #include <linux/module.h> #include <linux/sched.h> #include <linux/slab.h> @@ -79,7 +80,7 @@ static int hash_walk_new_entry(struct crypto_hash_walk *walk) sg = walk->sg; walk->offset = sg->offset; - walk->pg = sg_page(walk->sg) + (walk->offset >> PAGE_SHIFT); + walk->pg = nth_page(sg_page(walk->sg), walk->offset >> PAGE_SHIFT); walk->offset = offset_in_page(walk->offset); walk->entrylen = sg->length; @@ -201,25 +202,36 @@ int shash_ahash_digest(struct ahash_request *req, struct shash_desc *desc) unsigned int nbytes = req->nbytes; struct scatterlist *sg; unsigned int offset; + struct page *page; + void *data; int err; - if (ahash_request_isvirt(req)) + if (!nbytes || ahash_request_isvirt(req)) return crypto_shash_digest(desc, req->svirt, nbytes, req->result); - if (nbytes && - (sg = req->src, offset = sg->offset, - nbytes <= min(sg->length, ((unsigned int)(PAGE_SIZE)) - offset))) { - void *data; + sg = req->src; + if (nbytes > sg->length) + return crypto_shash_init(desc) ?: + shash_ahash_finup(req, desc); - data = kmap_local_page(sg_page(sg)); - err = crypto_shash_digest(desc, data + offset, nbytes, - req->result); - kunmap_local(data); - } else - err = crypto_shash_init(desc) ?: - shash_ahash_finup(req, desc); + page = sg_page(sg); + offset = sg->offset; + page = nth_page(page, offset >> PAGE_SHIFT); + offset = offset_in_page(offset); + if (!IS_ENABLED(CONFIG_HIGHMEM)) + return crypto_shash_digest(desc, page_address(page) + offset, + nbytes, req->result); + + if (nbytes > (unsigned int)PAGE_SIZE - offset) + return crypto_shash_init(desc) ?: + shash_ahash_finup(req, desc); + + data = kmap_local_page(page); + err = crypto_shash_digest(desc, data + offset, nbytes, + req->result); + kunmap_local(data); return err; } EXPORT_SYMBOL_GPL(shash_ahash_digest);
Use nth_page instead of adding n to the page pointer. This also fixes a real bug in shash_ahash_digest where the the check for continguous hash data may be incorrect in the presence of highmem. This could result in an incorrect hash or worse. Fixes: 5f7082ed4f48 ("crypto: hash - Export shash through hash") Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au> --- crypto/ahash.c | 38 +++++++++++++++++++++++++------------- 1 file changed, 25 insertions(+), 13 deletions(-)