diff mbox

[2/2] crypto: s5p-sss - Handle unaligned buffers

Message ID 1457259436-32560-2-git-send-email-krzk@kernel.org (mailing list archive)
State Superseded
Delegated to: Herbert Xu
Headers show

Commit Message

Krzysztof Kozlowski March 6, 2016, 10:17 a.m. UTC
During crypto selftests on Odroid XU3 (Exynos5422) some of the
algorithms failed because of passing AES-block unaligned source and
destination buffers:

alg: skcipher: encryption failed on chunk test 1 for ecb-aes-s5p: ret=22

Handle such case by copying the buffers to a new aligned and contiguous
space.

Signed-off-by: Krzysztof Kozlowski <krzk@kernel.org>
---
 drivers/crypto/s5p-sss.c | 149 +++++++++++++++++++++++++++++++++++++++++++----
 1 file changed, 137 insertions(+), 12 deletions(-)

Comments

Vladimir Zapolskiy March 7, 2016, 1:28 a.m. UTC | #1
Hi Krzysztof,

On 06.03.2016 12:17, Krzysztof Kozlowski wrote:
> During crypto selftests on Odroid XU3 (Exynos5422) some of the
> algorithms failed because of passing AES-block unaligned source and
> destination buffers:

excuse my ignorance what are the crypto selftests you reference? Are they
run-time self tests run by crypto manager on algorithm registration?

> alg: skcipher: encryption failed on chunk test 1 for ecb-aes-s5p: ret=22
> 
> Handle such case by copying the buffers to a new aligned and contiguous
> space.

I'm not quite convinced that a particular crypto accelerator driver
is the right place for this change, at least I don't see in the change
anything S5P-SSS specific, but it might be good to add the change.

Briefly looking at other drivers similarly atmel-* and omap-* have
slow path fallbacks, ./rockchip/rk3288_crypto_ablkcipher.c fails like
s5p-sss etc.

Anyway since it is a valid improvement, please feel free to add my

Acked-by: Vladimir Zapolskiy <vz@mleia.com>

> Signed-off-by: Krzysztof Kozlowski <krzk@kernel.org>
> ---
>  drivers/crypto/s5p-sss.c | 149 +++++++++++++++++++++++++++++++++++++++++++----
>  1 file changed, 137 insertions(+), 12 deletions(-)
> 

--
With best wishes,
Vladimir
--
To unsubscribe from this list: send the line "unsubscribe linux-crypto" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Krzysztof Kozlowski March 7, 2016, 3:59 a.m. UTC | #2
On 07.03.2016 10:28, Vladimir Zapolskiy wrote:
> Hi Krzysztof,
> 
> On 06.03.2016 12:17, Krzysztof Kozlowski wrote:
>> During crypto selftests on Odroid XU3 (Exynos5422) some of the
>> algorithms failed because of passing AES-block unaligned source and
>> destination buffers:
> 
> excuse my ignorance what are the crypto selftests you reference? Are they
> run-time self tests run by crypto manager on algorithm registration?

Yes, these tests. Disabled by CRYPTO_MANAGER_DISABLE_TESTS.

> 
>> alg: skcipher: encryption failed on chunk test 1 for ecb-aes-s5p: ret=22
>>
>> Handle such case by copying the buffers to a new aligned and contiguous
>> space.
> 
> I'm not quite convinced that a particular crypto accelerator driver
> is the right place for this change, at least I don't see in the change
> anything S5P-SSS specific, but it might be good to add the change.

The driver sets a .cra_alignmask but docs are saying that re-alignment
by crypto API might not happen. And in fact for selftets the data was
coming sometimes not-aligned.

On the other hand the CRYPTO_TEST was providing aligned data.

> 
> Briefly looking at other drivers similarly atmel-* and omap-* have
> slow path fallbacks, ./rockchip/rk3288_crypto_ablkcipher.c fails like
> s5p-sss etc.

Yes, I was kind of inspired by the omap solution.

> Anyway since it is a valid improvement, please feel free to add my
> 
> Acked-by: Vladimir Zapolskiy <vz@mleia.com>

Thanks!

Krzysztof
> 
>> Signed-off-by: Krzysztof Kozlowski <krzk@kernel.org>
>> ---
>>  drivers/crypto/s5p-sss.c | 149 +++++++++++++++++++++++++++++++++++++++++++----
>>  1 file changed, 137 insertions(+), 12 deletions(-)
>>

--
To unsubscribe from this list: send the line "unsubscribe linux-crypto" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
diff mbox

Patch

diff --git a/drivers/crypto/s5p-sss.c b/drivers/crypto/s5p-sss.c
index 60f835455a41..bac1f4593f98 100644
--- a/drivers/crypto/s5p-sss.c
+++ b/drivers/crypto/s5p-sss.c
@@ -28,6 +28,7 @@ 
 #include <crypto/algapi.h>
 #include <crypto/aes.h>
 #include <crypto/ctr.h>
+#include <crypto/scatterwalk.h>
 
 #define _SBF(s, v)                      ((v) << (s))
 #define _BIT(b)                         _SBF(b, 1)
@@ -185,6 +186,10 @@  struct s5p_aes_dev {
 	struct scatterlist         *sg_src;
 	struct scatterlist         *sg_dst;
 
+	/* In case of unaligned access: */
+	struct scatterlist         *sg_src_cpy;
+	struct scatterlist         *sg_dst_cpy;
+
 	struct tasklet_struct       tasklet;
 	struct crypto_queue         queue;
 	bool                        busy;
@@ -244,8 +249,45 @@  static void s5p_set_dma_outdata(struct s5p_aes_dev *dev, struct scatterlist *sg)
 	SSS_WRITE(dev, FCBTDMAL, sg_dma_len(sg));
 }
 
+static void s5p_free_sg_cpy(struct s5p_aes_dev *dev, struct scatterlist **sg)
+{
+	int len;
+
+	if (!*sg)
+		return;
+
+	len = ALIGN(dev->req->nbytes, AES_BLOCK_SIZE);
+	free_pages((unsigned long)sg_virt(*sg), get_order(len));
+
+	kfree(*sg);
+	*sg = NULL;
+}
+
+static void s5p_sg_copy_buf(void *buf, struct scatterlist *sg,
+			    unsigned int nbytes, int out)
+{
+	struct scatter_walk walk;
+
+	if (!nbytes)
+		return;
+
+	scatterwalk_start(&walk, sg);
+	scatterwalk_copychunks(buf, &walk, nbytes, out);
+	scatterwalk_done(&walk, out, 0);
+}
+
 static void s5p_aes_complete(struct s5p_aes_dev *dev, int err)
 {
+	if (dev->sg_dst_cpy) {
+		dev_dbg(dev->dev,
+			"Copying %d bytes of output data back to original place\n",
+			dev->req->nbytes);
+		s5p_sg_copy_buf(sg_virt(dev->sg_dst_cpy), dev->req->dst,
+				dev->req->nbytes, 1);
+	}
+	s5p_free_sg_cpy(dev, &dev->sg_src_cpy);
+	s5p_free_sg_cpy(dev, &dev->sg_dst_cpy);
+
 	/* holding a lock outside */
 	dev->req->base.complete(&dev->req->base, err);
 	dev->busy = false;
@@ -261,14 +303,36 @@  static void s5p_unset_indata(struct s5p_aes_dev *dev)
 	dma_unmap_sg(dev->dev, dev->sg_src, 1, DMA_TO_DEVICE);
 }
 
+static int s5p_make_sg_cpy(struct s5p_aes_dev *dev, struct scatterlist *src,
+			    struct scatterlist **dst)
+{
+	void *pages;
+	int len;
+
+	*dst = kmalloc(sizeof(**dst), GFP_ATOMIC);
+	if (!*dst)
+		return -ENOMEM;
+
+	len = ALIGN(dev->req->nbytes, AES_BLOCK_SIZE);
+	pages = (void *)__get_free_pages(GFP_ATOMIC, get_order(len));
+	if (!pages) {
+		kfree(*dst);
+		*dst = NULL;
+		return -ENOMEM;
+	}
+
+	s5p_sg_copy_buf(pages, src, dev->req->nbytes, 0);
+
+	sg_init_table(*dst, 1);
+	sg_set_buf(*dst, pages, len);
+
+	return 0;
+}
+
 static int s5p_set_outdata(struct s5p_aes_dev *dev, struct scatterlist *sg)
 {
 	int err;
 
-	if (!IS_ALIGNED(sg_dma_len(sg), AES_BLOCK_SIZE)) {
-		err = -EINVAL;
-		goto exit;
-	}
 	if (!sg_dma_len(sg)) {
 		err = -EINVAL;
 		goto exit;
@@ -291,10 +355,6 @@  static int s5p_set_indata(struct s5p_aes_dev *dev, struct scatterlist *sg)
 {
 	int err;
 
-	if (!IS_ALIGNED(sg_dma_len(sg), AES_BLOCK_SIZE)) {
-		err = -EINVAL;
-		goto exit;
-	}
 	if (!sg_dma_len(sg)) {
 		err = -EINVAL;
 		goto exit;
@@ -394,6 +454,70 @@  static void s5p_set_aes(struct s5p_aes_dev *dev,
 	memcpy_toio(keystart, key, keylen);
 }
 
+static bool s5p_is_sg_aligned(struct scatterlist *sg)
+{
+	do {
+		if (!IS_ALIGNED(sg_dma_len(sg), AES_BLOCK_SIZE))
+			return false;
+	} while (!sg_is_last(sg));
+
+	return true;
+}
+
+static int s5p_set_indata_start(struct s5p_aes_dev *dev,
+				struct ablkcipher_request *req)
+{
+	struct scatterlist *sg;
+	int err;
+
+	dev->sg_src_cpy = NULL;
+	sg = req->src;
+	if (!s5p_is_sg_aligned(sg)) {
+		dev_dbg(dev->dev,
+			"At least one unaligned source scatter list, making a copy\n");
+		err = s5p_make_sg_cpy(dev, sg, &dev->sg_src_cpy);
+		if (err)
+			return err;
+
+		sg = dev->sg_src_cpy;
+	}
+
+	err = s5p_set_indata(dev, sg);
+	if (err) {
+		s5p_free_sg_cpy(dev, &dev->sg_src_cpy);
+		return err;
+	}
+
+	return 0;
+}
+
+static int s5p_set_outdata_start(struct s5p_aes_dev *dev,
+				struct ablkcipher_request *req)
+{
+	struct scatterlist *sg;
+	int err;
+
+	dev->sg_dst_cpy = NULL;
+	sg = req->dst;
+	if (!s5p_is_sg_aligned(sg)) {
+		dev_dbg(dev->dev,
+			"At least one unaligned dest scatter list, making a copy\n");
+		err = s5p_make_sg_cpy(dev, sg, &dev->sg_dst_cpy);
+		if (err)
+			return err;
+
+		sg = dev->sg_dst_cpy;
+	}
+
+	err = s5p_set_outdata(dev, sg);
+	if (err) {
+		s5p_free_sg_cpy(dev, &dev->sg_dst_cpy);
+		return err;
+	}
+
+	return 0;
+}
+
 static void s5p_aes_crypt_start(struct s5p_aes_dev *dev, unsigned long mode)
 {
 	struct ablkcipher_request  *req = dev->req;
@@ -430,19 +554,19 @@  static void s5p_aes_crypt_start(struct s5p_aes_dev *dev, unsigned long mode)
 		  SSS_FCINTENCLR_BTDMAINTENCLR | SSS_FCINTENCLR_BRDMAINTENCLR);
 	SSS_WRITE(dev, FCFIFOCTRL, 0x00);
 
-	err = s5p_set_indata(dev, req->src);
+	err = s5p_set_indata_start(dev, req);
 	if (err)
 		goto indata_error;
 
-	err = s5p_set_outdata(dev, req->dst);
+	err = s5p_set_outdata_start(dev, req);
 	if (err)
 		goto outdata_error;
 
 	SSS_AES_WRITE(dev, AES_CONTROL, aes_control);
 	s5p_set_aes(dev, dev->ctx->aes_key, req->info, dev->ctx->keylen);
 
-	s5p_set_dma_indata(dev,  req->src);
-	s5p_set_dma_outdata(dev, req->dst);
+	s5p_set_dma_indata(dev,  dev->sg_src);
+	s5p_set_dma_outdata(dev, dev->sg_dst);
 
 	SSS_WRITE(dev, FCINTENSET,
 		  SSS_FCINTENSET_BTDMAINTENSET | SSS_FCINTENSET_BRDMAINTENSET);
@@ -452,6 +576,7 @@  static void s5p_aes_crypt_start(struct s5p_aes_dev *dev, unsigned long mode)
 	return;
 
 outdata_error:
+	s5p_free_sg_cpy(dev, &dev->sg_src_cpy);
 	s5p_unset_indata(dev);
 
 indata_error: