diff mbox series

[v2,4/4] crypto: qat - fallback for xts with 192 bit keys

Message ID 20200626080429.155450-5-giovanni.cabiddu@intel.com (mailing list archive)
State Superseded
Delegated to: Herbert Xu
Headers show
Series crypto: qat - fixes to aes xts | expand

Commit Message

Cabiddu, Giovanni June 26, 2020, 8:04 a.m. UTC
Forward requests to another provider if the key length is 192 bits as
this is not supported by the QAT accelerators.

This fixes the following issue reported by the extra self test:
alg: skcipher: qat_aes_xts setkey failed on test vector "random: len=3204
klen=48"; expected_error=0, actual_error=-22, flags=0x1

Signed-off-by: Giovanni Cabiddu <giovanni.cabiddu@intel.com>
---
 drivers/crypto/qat/qat_common/qat_algs.c | 67 ++++++++++++++++++++++--
 1 file changed, 64 insertions(+), 3 deletions(-)

Comments

Ard Biesheuvel June 26, 2020, 6:15 p.m. UTC | #1
On Fri, 26 Jun 2020 at 10:04, Giovanni Cabiddu
<giovanni.cabiddu@intel.com> wrote:
>
> Forward requests to another provider if the key length is 192 bits as
> this is not supported by the QAT accelerators.
>
> This fixes the following issue reported by the extra self test:
> alg: skcipher: qat_aes_xts setkey failed on test vector "random: len=3204
> klen=48"; expected_error=0, actual_error=-22, flags=0x1
>
> Signed-off-by: Giovanni Cabiddu <giovanni.cabiddu@intel.com>
> ---
>  drivers/crypto/qat/qat_common/qat_algs.c | 67 ++++++++++++++++++++++--
>  1 file changed, 64 insertions(+), 3 deletions(-)
>
> diff --git a/drivers/crypto/qat/qat_common/qat_algs.c b/drivers/crypto/qat/qat_common/qat_algs.c
> index 77bdff0118f7..5e8c0b6f2834 100644
> --- a/drivers/crypto/qat/qat_common/qat_algs.c
> +++ b/drivers/crypto/qat/qat_common/qat_algs.c
> @@ -88,6 +88,8 @@ struct qat_alg_skcipher_ctx {
>         struct icp_qat_fw_la_bulk_req enc_fw_req;
>         struct icp_qat_fw_la_bulk_req dec_fw_req;
>         struct qat_crypto_instance *inst;
> +       struct crypto_skcipher *ftfm;
> +       bool fallback;
>  };
>
>  static int qat_get_inter_state_size(enum icp_qat_hw_auth_algo qat_hash_alg)
> @@ -994,12 +996,25 @@ static int qat_alg_skcipher_ctr_setkey(struct crypto_skcipher *tfm,
>  static int qat_alg_skcipher_xts_setkey(struct crypto_skcipher *tfm,
>                                        const u8 *key, unsigned int keylen)
>  {
> +       struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
>         int ret;
>
>         ret = xts_verify_key(tfm, key, keylen);
>         if (ret)
>                 return ret;
>
> +       if (keylen >> 1 == AES_KEYSIZE_192) {
> +               ret = crypto_skcipher_setkey(ctx->ftfm, key, keylen);
> +               if (ret)
> +                       return ret;
> +
> +               ctx->fallback = true;
> +
> +               return 0;
> +       }
> +
> +       ctx->fallback = false;
> +
>         return qat_alg_skcipher_setkey(tfm, key, keylen,
>                                        ICP_QAT_HW_CIPHER_XTS_MODE);
>  }
> @@ -1066,9 +1081,19 @@ static int qat_alg_skcipher_blk_encrypt(struct skcipher_request *req)
>
>  static int qat_alg_skcipher_xts_encrypt(struct skcipher_request *req)
>  {
> +       struct crypto_skcipher *stfm = crypto_skcipher_reqtfm(req);
> +       struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(stfm);
> +       struct skcipher_request *nreq = skcipher_request_ctx(req);
> +
>         if (req->cryptlen < XTS_BLOCK_SIZE)
>                 return -EINVAL;
>
> +       if (ctx->fallback) {
> +               memcpy(nreq, req, sizeof(*req));
> +               skcipher_request_set_tfm(nreq, ctx->ftfm);
> +               return crypto_skcipher_encrypt(nreq);
> +       }
> +
>         return qat_alg_skcipher_encrypt(req);
>  }
>
> @@ -1134,9 +1159,19 @@ static int qat_alg_skcipher_blk_decrypt(struct skcipher_request *req)
>
>  static int qat_alg_skcipher_xts_decrypt(struct skcipher_request *req)
>  {
> +       struct crypto_skcipher *stfm = crypto_skcipher_reqtfm(req);
> +       struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(stfm);
> +       struct skcipher_request *nreq = skcipher_request_ctx(req);
> +
>         if (req->cryptlen < XTS_BLOCK_SIZE)
>                 return -EINVAL;
>
> +       if (ctx->fallback) {
> +               memcpy(nreq, req, sizeof(*req));
> +               skcipher_request_set_tfm(nreq, ctx->ftfm);
> +               return crypto_skcipher_decrypt(nreq);
> +       }
> +
>         return qat_alg_skcipher_decrypt(req);
>  }
>
> @@ -1200,6 +1235,23 @@ static int qat_alg_skcipher_init_tfm(struct crypto_skcipher *tfm)
>         return 0;
>  }
>
> +static int qat_alg_skcipher_init_xts_tfm(struct crypto_skcipher *tfm)
> +{
> +       struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
> +       int reqsize;
> +
> +       ctx->ftfm = crypto_alloc_skcipher("xts(aes)", 0, CRYPTO_ALG_ASYNC);

Why are you only permitting synchronous fallbacks? If the logic above
is sound, and copies the base.complete and base.data fields as well,
the fallback can complete asynchronously without problems.

Note that SIMD s/w implementations of XTS(AES) are asynchronous as
well, as they use the crypto_simd helper which queues requests for
asynchronous completion if the context from which the request was
issued does not permit access to the SIMD register file (e.g., softirq
context on some architectures, if the interrupted context is also
using SIMD)


> +       if (IS_ERR(ctx->ftfm))
> +               return PTR_ERR(ctx->ftfm);
> +
> +       reqsize = max(sizeof(struct qat_crypto_request),
> +                     sizeof(struct skcipher_request) +
> +                     crypto_skcipher_reqsize(ctx->ftfm));
> +       crypto_skcipher_set_reqsize(tfm, reqsize);
> +
> +       return 0;
> +}
> +
>  static void qat_alg_skcipher_exit_tfm(struct crypto_skcipher *tfm)
>  {
>         struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
> @@ -1227,6 +1279,15 @@ static void qat_alg_skcipher_exit_tfm(struct crypto_skcipher *tfm)
>         qat_crypto_put_instance(inst);
>  }
>
> +static void qat_alg_skcipher_exit_xts_tfm(struct crypto_skcipher *tfm)
> +{
> +       struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
> +
> +       if (ctx->ftfm)
> +               crypto_free_skcipher(ctx->ftfm);
> +
> +       qat_alg_skcipher_exit_tfm(tfm);
> +}
>
>  static struct aead_alg qat_aeads[] = { {
>         .base = {
> @@ -1321,14 +1382,14 @@ static struct skcipher_alg qat_skciphers[] = { {
>         .base.cra_name = "xts(aes)",
>         .base.cra_driver_name = "qat_aes_xts",
>         .base.cra_priority = 4001,
> -       .base.cra_flags = CRYPTO_ALG_ASYNC,
> +       .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
>         .base.cra_blocksize = AES_BLOCK_SIZE,
>         .base.cra_ctxsize = sizeof(struct qat_alg_skcipher_ctx),
>         .base.cra_alignmask = 0,
>         .base.cra_module = THIS_MODULE,
>
> -       .init = qat_alg_skcipher_init_tfm,
> -       .exit = qat_alg_skcipher_exit_tfm,
> +       .init = qat_alg_skcipher_init_xts_tfm,
> +       .exit = qat_alg_skcipher_exit_xts_tfm,
>         .setkey = qat_alg_skcipher_xts_setkey,
>         .decrypt = qat_alg_skcipher_xts_decrypt,
>         .encrypt = qat_alg_skcipher_xts_encrypt,
> --
> 2.26.2
>
Cabiddu, Giovanni June 29, 2020, 5:04 p.m. UTC | #2
Thanks for your feedback Ard.

On Fri, Jun 26, 2020 at 08:15:16PM +0200, Ard Biesheuvel wrote:
> On Fri, 26 Jun 2020 at 10:04, Giovanni Cabiddu
> <giovanni.cabiddu@intel.com> wrote:
> >
> > +static int qat_alg_skcipher_init_xts_tfm(struct crypto_skcipher *tfm)
> > +{
> > +       struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
> > +       int reqsize;
> > +
> > +       ctx->ftfm = crypto_alloc_skcipher("xts(aes)", 0, CRYPTO_ALG_ASYNC);
> 
> Why are you only permitting synchronous fallbacks? If the logic above
> is sound, and copies the base.complete and base.data fields as well,
> the fallback can complete asynchronously without problems.
> Note that SIMD s/w implementations of XTS(AES) are asynchronous as
> well, as they use the crypto_simd helper which queues requests for
> asynchronous completion if the context from which the request was
> issued does not permit access to the SIMD register file (e.g., softirq
> context on some architectures, if the interrupted context is also
> using SIMD)
I did it this way since I though I didn't have a way to test it with an
asynchronous sw implementation.
I changed this line to avoid masking the asynchronous implementations
and test it by forcing simd.c to use always cryptd (don't know if there
is a simpler way to do it).

Also, I added to the mask CRYPTO_ALG_NEED_FALLBACK so I don't get another
implementation that requires a fallback.

I'm going to send a v3.

Regards,
Ard Biesheuvel June 30, 2020, 10:09 a.m. UTC | #3
On Mon, 29 Jun 2020 at 19:05, Giovanni Cabiddu
<giovanni.cabiddu@intel.com> wrote:
>
> Thanks for your feedback Ard.
>
> On Fri, Jun 26, 2020 at 08:15:16PM +0200, Ard Biesheuvel wrote:
> > On Fri, 26 Jun 2020 at 10:04, Giovanni Cabiddu
> > <giovanni.cabiddu@intel.com> wrote:
> > >
> > > +static int qat_alg_skcipher_init_xts_tfm(struct crypto_skcipher *tfm)
> > > +{
> > > +       struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
> > > +       int reqsize;
> > > +
> > > +       ctx->ftfm = crypto_alloc_skcipher("xts(aes)", 0, CRYPTO_ALG_ASYNC);
> >
> > Why are you only permitting synchronous fallbacks? If the logic above
> > is sound, and copies the base.complete and base.data fields as well,
> > the fallback can complete asynchronously without problems.
> > Note that SIMD s/w implementations of XTS(AES) are asynchronous as
> > well, as they use the crypto_simd helper which queues requests for
> > asynchronous completion if the context from which the request was
> > issued does not permit access to the SIMD register file (e.g., softirq
> > context on some architectures, if the interrupted context is also
> > using SIMD)
> I did it this way since I though I didn't have a way to test it with an
> asynchronous sw implementation.
> I changed this line to avoid masking the asynchronous implementations
> and test it by forcing simd.c to use always cryptd (don't know if there
> is a simpler way to do it).
>

This is exactly how I tested it in the past, but note that the
extended testing that Eric implemented will also run from a context
where SIMD is disabled artificially, and so you should be getting this
behavior in any case.

> Also, I added to the mask CRYPTO_ALG_NEED_FALLBACK so I don't get another
> implementation that requires a fallback.
>
> I'm going to send a v3.
>
> Regards,
>
> --
> Giovanni
diff mbox series

Patch

diff --git a/drivers/crypto/qat/qat_common/qat_algs.c b/drivers/crypto/qat/qat_common/qat_algs.c
index 77bdff0118f7..5e8c0b6f2834 100644
--- a/drivers/crypto/qat/qat_common/qat_algs.c
+++ b/drivers/crypto/qat/qat_common/qat_algs.c
@@ -88,6 +88,8 @@  struct qat_alg_skcipher_ctx {
 	struct icp_qat_fw_la_bulk_req enc_fw_req;
 	struct icp_qat_fw_la_bulk_req dec_fw_req;
 	struct qat_crypto_instance *inst;
+	struct crypto_skcipher *ftfm;
+	bool fallback;
 };
 
 static int qat_get_inter_state_size(enum icp_qat_hw_auth_algo qat_hash_alg)
@@ -994,12 +996,25 @@  static int qat_alg_skcipher_ctr_setkey(struct crypto_skcipher *tfm,
 static int qat_alg_skcipher_xts_setkey(struct crypto_skcipher *tfm,
 				       const u8 *key, unsigned int keylen)
 {
+	struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
 	int ret;
 
 	ret = xts_verify_key(tfm, key, keylen);
 	if (ret)
 		return ret;
 
+	if (keylen >> 1 == AES_KEYSIZE_192) {
+		ret = crypto_skcipher_setkey(ctx->ftfm, key, keylen);
+		if (ret)
+			return ret;
+
+		ctx->fallback = true;
+
+		return 0;
+	}
+
+	ctx->fallback = false;
+
 	return qat_alg_skcipher_setkey(tfm, key, keylen,
 				       ICP_QAT_HW_CIPHER_XTS_MODE);
 }
@@ -1066,9 +1081,19 @@  static int qat_alg_skcipher_blk_encrypt(struct skcipher_request *req)
 
 static int qat_alg_skcipher_xts_encrypt(struct skcipher_request *req)
 {
+	struct crypto_skcipher *stfm = crypto_skcipher_reqtfm(req);
+	struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(stfm);
+	struct skcipher_request *nreq = skcipher_request_ctx(req);
+
 	if (req->cryptlen < XTS_BLOCK_SIZE)
 		return -EINVAL;
 
+	if (ctx->fallback) {
+		memcpy(nreq, req, sizeof(*req));
+		skcipher_request_set_tfm(nreq, ctx->ftfm);
+		return crypto_skcipher_encrypt(nreq);
+	}
+
 	return qat_alg_skcipher_encrypt(req);
 }
 
@@ -1134,9 +1159,19 @@  static int qat_alg_skcipher_blk_decrypt(struct skcipher_request *req)
 
 static int qat_alg_skcipher_xts_decrypt(struct skcipher_request *req)
 {
+	struct crypto_skcipher *stfm = crypto_skcipher_reqtfm(req);
+	struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(stfm);
+	struct skcipher_request *nreq = skcipher_request_ctx(req);
+
 	if (req->cryptlen < XTS_BLOCK_SIZE)
 		return -EINVAL;
 
+	if (ctx->fallback) {
+		memcpy(nreq, req, sizeof(*req));
+		skcipher_request_set_tfm(nreq, ctx->ftfm);
+		return crypto_skcipher_decrypt(nreq);
+	}
+
 	return qat_alg_skcipher_decrypt(req);
 }
 
@@ -1200,6 +1235,23 @@  static int qat_alg_skcipher_init_tfm(struct crypto_skcipher *tfm)
 	return 0;
 }
 
+static int qat_alg_skcipher_init_xts_tfm(struct crypto_skcipher *tfm)
+{
+	struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+	int reqsize;
+
+	ctx->ftfm = crypto_alloc_skcipher("xts(aes)", 0, CRYPTO_ALG_ASYNC);
+	if (IS_ERR(ctx->ftfm))
+		return PTR_ERR(ctx->ftfm);
+
+	reqsize = max(sizeof(struct qat_crypto_request),
+		      sizeof(struct skcipher_request) +
+		      crypto_skcipher_reqsize(ctx->ftfm));
+	crypto_skcipher_set_reqsize(tfm, reqsize);
+
+	return 0;
+}
+
 static void qat_alg_skcipher_exit_tfm(struct crypto_skcipher *tfm)
 {
 	struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
@@ -1227,6 +1279,15 @@  static void qat_alg_skcipher_exit_tfm(struct crypto_skcipher *tfm)
 	qat_crypto_put_instance(inst);
 }
 
+static void qat_alg_skcipher_exit_xts_tfm(struct crypto_skcipher *tfm)
+{
+	struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+
+	if (ctx->ftfm)
+		crypto_free_skcipher(ctx->ftfm);
+
+	qat_alg_skcipher_exit_tfm(tfm);
+}
 
 static struct aead_alg qat_aeads[] = { {
 	.base = {
@@ -1321,14 +1382,14 @@  static struct skcipher_alg qat_skciphers[] = { {
 	.base.cra_name = "xts(aes)",
 	.base.cra_driver_name = "qat_aes_xts",
 	.base.cra_priority = 4001,
-	.base.cra_flags = CRYPTO_ALG_ASYNC,
+	.base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
 	.base.cra_blocksize = AES_BLOCK_SIZE,
 	.base.cra_ctxsize = sizeof(struct qat_alg_skcipher_ctx),
 	.base.cra_alignmask = 0,
 	.base.cra_module = THIS_MODULE,
 
-	.init = qat_alg_skcipher_init_tfm,
-	.exit = qat_alg_skcipher_exit_tfm,
+	.init = qat_alg_skcipher_init_xts_tfm,
+	.exit = qat_alg_skcipher_exit_xts_tfm,
 	.setkey = qat_alg_skcipher_xts_setkey,
 	.decrypt = qat_alg_skcipher_xts_decrypt,
 	.encrypt = qat_alg_skcipher_xts_encrypt,