diff mbox series

crypto: arm64/sha: fix function types

Message ID 20191112223046.176097-1-samitolvanen@google.com (mailing list archive)
State New, archived
Headers show
Series crypto: arm64/sha: fix function types | expand

Commit Message

Sami Tolvanen Nov. 12, 2019, 10:30 p.m. UTC
Declare assembly functions with the expected function type
instead of casting pointers in C to avoid type mismatch failures
with Control-Flow Integrity (CFI) checking.

Signed-off-by: Sami Tolvanen <samitolvanen@google.com>
---
 arch/arm64/crypto/sha1-ce-glue.c   | 12 +++++-------
 arch/arm64/crypto/sha2-ce-glue.c   | 26 +++++++++++---------------
 arch/arm64/crypto/sha256-glue.c    | 30 ++++++++++++------------------
 arch/arm64/crypto/sha512-ce-glue.c | 23 ++++++++++-------------
 arch/arm64/crypto/sha512-glue.c    | 13 +++++--------
 5 files changed, 43 insertions(+), 61 deletions(-)


base-commit: 100d46bd72ec689a5582c2f5f4deadc5bcb92d60

Comments

Kees Cook Nov. 13, 2019, 6:27 p.m. UTC | #1
On Tue, Nov 12, 2019 at 02:30:46PM -0800, Sami Tolvanen wrote:
> Declare assembly functions with the expected function type
> instead of casting pointers in C to avoid type mismatch failures
> with Control-Flow Integrity (CFI) checking.
> 
> Signed-off-by: Sami Tolvanen <samitolvanen@google.com>

Looks good, yes. This looks very similar to what I needed to do for
x86's SHA routines.

Reviewed-by: Kees Cook <keescook@chromium.org>

> ---
>  arch/arm64/crypto/sha1-ce-glue.c   | 12 +++++-------
>  arch/arm64/crypto/sha2-ce-glue.c   | 26 +++++++++++---------------
>  arch/arm64/crypto/sha256-glue.c    | 30 ++++++++++++------------------
>  arch/arm64/crypto/sha512-ce-glue.c | 23 ++++++++++-------------
>  arch/arm64/crypto/sha512-glue.c    | 13 +++++--------
>  5 files changed, 43 insertions(+), 61 deletions(-)
> 
> diff --git a/arch/arm64/crypto/sha1-ce-glue.c b/arch/arm64/crypto/sha1-ce-glue.c
> index bdc1b6d7aff7..3153a9bbb683 100644
> --- a/arch/arm64/crypto/sha1-ce-glue.c
> +++ b/arch/arm64/crypto/sha1-ce-glue.c
> @@ -25,7 +25,7 @@ struct sha1_ce_state {
>  	u32			finalize;
>  };
>  
> -asmlinkage void sha1_ce_transform(struct sha1_ce_state *sst, u8 const *src,
> +asmlinkage void sha1_ce_transform(struct sha1_state *sst, u8 const *src,
>  				  int blocks);
>  
>  const u32 sha1_ce_offsetof_count = offsetof(struct sha1_ce_state, sst.count);
> @@ -41,8 +41,7 @@ static int sha1_ce_update(struct shash_desc *desc, const u8 *data,
>  
>  	sctx->finalize = 0;
>  	kernel_neon_begin();
> -	sha1_base_do_update(desc, data, len,
> -			    (sha1_block_fn *)sha1_ce_transform);
> +	sha1_base_do_update(desc, data, len, sha1_ce_transform);
>  	kernel_neon_end();
>  
>  	return 0;
> @@ -64,10 +63,9 @@ static int sha1_ce_finup(struct shash_desc *desc, const u8 *data,
>  	sctx->finalize = finalize;
>  
>  	kernel_neon_begin();
> -	sha1_base_do_update(desc, data, len,
> -			    (sha1_block_fn *)sha1_ce_transform);
> +	sha1_base_do_update(desc, data, len, sha1_ce_transform);
>  	if (!finalize)
> -		sha1_base_do_finalize(desc, (sha1_block_fn *)sha1_ce_transform);
> +		sha1_base_do_finalize(desc, sha1_ce_transform);
>  	kernel_neon_end();
>  	return sha1_base_finish(desc, out);
>  }
> @@ -81,7 +79,7 @@ static int sha1_ce_final(struct shash_desc *desc, u8 *out)
>  
>  	sctx->finalize = 0;
>  	kernel_neon_begin();
> -	sha1_base_do_finalize(desc, (sha1_block_fn *)sha1_ce_transform);
> +	sha1_base_do_finalize(desc, sha1_ce_transform);
>  	kernel_neon_end();
>  	return sha1_base_finish(desc, out);
>  }
> diff --git a/arch/arm64/crypto/sha2-ce-glue.c b/arch/arm64/crypto/sha2-ce-glue.c
> index 604a01a4ede6..a4dacedfe4d4 100644
> --- a/arch/arm64/crypto/sha2-ce-glue.c
> +++ b/arch/arm64/crypto/sha2-ce-glue.c
> @@ -25,7 +25,7 @@ struct sha256_ce_state {
>  	u32			finalize;
>  };
>  
> -asmlinkage void sha2_ce_transform(struct sha256_ce_state *sst, u8 const *src,
> +asmlinkage void sha2_ce_transform(struct sha256_state *sst, u8 const *src,
>  				  int blocks);
>  
>  const u32 sha256_ce_offsetof_count = offsetof(struct sha256_ce_state,
> @@ -33,7 +33,8 @@ const u32 sha256_ce_offsetof_count = offsetof(struct sha256_ce_state,
>  const u32 sha256_ce_offsetof_finalize = offsetof(struct sha256_ce_state,
>  						 finalize);
>  
> -asmlinkage void sha256_block_data_order(u32 *digest, u8 const *src, int blocks);
> +asmlinkage void sha256_block_data_order(struct sha256_state *sst, u8 const *src,
> +					int blocks);
>  
>  static int sha256_ce_update(struct shash_desc *desc, const u8 *data,
>  			    unsigned int len)
> @@ -42,12 +43,11 @@ static int sha256_ce_update(struct shash_desc *desc, const u8 *data,
>  
>  	if (!crypto_simd_usable())
>  		return sha256_base_do_update(desc, data, len,
> -				(sha256_block_fn *)sha256_block_data_order);
> +				sha256_block_data_order);
>  
>  	sctx->finalize = 0;
>  	kernel_neon_begin();
> -	sha256_base_do_update(desc, data, len,
> -			      (sha256_block_fn *)sha2_ce_transform);
> +	sha256_base_do_update(desc, data, len, sha2_ce_transform);
>  	kernel_neon_end();
>  
>  	return 0;
> @@ -62,9 +62,8 @@ static int sha256_ce_finup(struct shash_desc *desc, const u8 *data,
>  	if (!crypto_simd_usable()) {
>  		if (len)
>  			sha256_base_do_update(desc, data, len,
> -				(sha256_block_fn *)sha256_block_data_order);
> -		sha256_base_do_finalize(desc,
> -				(sha256_block_fn *)sha256_block_data_order);
> +				sha256_block_data_order);
> +		sha256_base_do_finalize(desc, sha256_block_data_order);
>  		return sha256_base_finish(desc, out);
>  	}
>  
> @@ -75,11 +74,9 @@ static int sha256_ce_finup(struct shash_desc *desc, const u8 *data,
>  	sctx->finalize = finalize;
>  
>  	kernel_neon_begin();
> -	sha256_base_do_update(desc, data, len,
> -			      (sha256_block_fn *)sha2_ce_transform);
> +	sha256_base_do_update(desc, data, len, sha2_ce_transform);
>  	if (!finalize)
> -		sha256_base_do_finalize(desc,
> -					(sha256_block_fn *)sha2_ce_transform);
> +		sha256_base_do_finalize(desc, sha2_ce_transform);
>  	kernel_neon_end();
>  	return sha256_base_finish(desc, out);
>  }
> @@ -89,14 +86,13 @@ static int sha256_ce_final(struct shash_desc *desc, u8 *out)
>  	struct sha256_ce_state *sctx = shash_desc_ctx(desc);
>  
>  	if (!crypto_simd_usable()) {
> -		sha256_base_do_finalize(desc,
> -				(sha256_block_fn *)sha256_block_data_order);
> +		sha256_base_do_finalize(desc, sha256_block_data_order);
>  		return sha256_base_finish(desc, out);
>  	}
>  
>  	sctx->finalize = 0;
>  	kernel_neon_begin();
> -	sha256_base_do_finalize(desc, (sha256_block_fn *)sha2_ce_transform);
> +	sha256_base_do_finalize(desc, sha2_ce_transform);
>  	kernel_neon_end();
>  	return sha256_base_finish(desc, out);
>  }
> diff --git a/arch/arm64/crypto/sha256-glue.c b/arch/arm64/crypto/sha256-glue.c
> index e273faca924f..dac3157937ba 100644
> --- a/arch/arm64/crypto/sha256-glue.c
> +++ b/arch/arm64/crypto/sha256-glue.c
> @@ -23,28 +23,25 @@ MODULE_LICENSE("GPL v2");
>  MODULE_ALIAS_CRYPTO("sha224");
>  MODULE_ALIAS_CRYPTO("sha256");
>  
> -asmlinkage void sha256_block_data_order(u32 *digest, const void *data,
> -					unsigned int num_blks);
> +asmlinkage void sha256_block_data_order(struct sha256_state *sst, u8 const *src,
> +					int blocks);
>  EXPORT_SYMBOL(sha256_block_data_order);
>  
> -asmlinkage void sha256_block_neon(u32 *digest, const void *data,
> -				  unsigned int num_blks);
> +asmlinkage void sha256_block_neon(struct sha256_state *sst, u8 const *src,
> +				  int blocks);
>  
>  static int crypto_sha256_arm64_update(struct shash_desc *desc, const u8 *data,
>  				      unsigned int len)
>  {
> -	return sha256_base_do_update(desc, data, len,
> -				(sha256_block_fn *)sha256_block_data_order);
> +	return sha256_base_do_update(desc, data, len, sha256_block_data_order);
>  }
>  
>  static int crypto_sha256_arm64_finup(struct shash_desc *desc, const u8 *data,
>  				     unsigned int len, u8 *out)
>  {
>  	if (len)
> -		sha256_base_do_update(desc, data, len,
> -				(sha256_block_fn *)sha256_block_data_order);
> -	sha256_base_do_finalize(desc,
> -				(sha256_block_fn *)sha256_block_data_order);
> +		sha256_base_do_update(desc, data, len, sha256_block_data_order);
> +	sha256_base_do_finalize(desc, sha256_block_data_order);
>  
>  	return sha256_base_finish(desc, out);
>  }
> @@ -87,7 +84,7 @@ static int sha256_update_neon(struct shash_desc *desc, const u8 *data,
>  
>  	if (!crypto_simd_usable())
>  		return sha256_base_do_update(desc, data, len,
> -				(sha256_block_fn *)sha256_block_data_order);
> +				sha256_block_data_order);
>  
>  	while (len > 0) {
>  		unsigned int chunk = len;
> @@ -103,8 +100,7 @@ static int sha256_update_neon(struct shash_desc *desc, const u8 *data,
>  				sctx->count % SHA256_BLOCK_SIZE;
>  
>  		kernel_neon_begin();
> -		sha256_base_do_update(desc, data, chunk,
> -				      (sha256_block_fn *)sha256_block_neon);
> +		sha256_base_do_update(desc, data, chunk, sha256_block_neon);
>  		kernel_neon_end();
>  		data += chunk;
>  		len -= chunk;
> @@ -118,15 +114,13 @@ static int sha256_finup_neon(struct shash_desc *desc, const u8 *data,
>  	if (!crypto_simd_usable()) {
>  		if (len)
>  			sha256_base_do_update(desc, data, len,
> -				(sha256_block_fn *)sha256_block_data_order);
> -		sha256_base_do_finalize(desc,
> -				(sha256_block_fn *)sha256_block_data_order);
> +				sha256_block_data_order);
> +		sha256_base_do_finalize(desc, sha256_block_data_order);
>  	} else {
>  		if (len)
>  			sha256_update_neon(desc, data, len);
>  		kernel_neon_begin();
> -		sha256_base_do_finalize(desc,
> -				(sha256_block_fn *)sha256_block_neon);
> +		sha256_base_do_finalize(desc, sha256_block_neon);
>  		kernel_neon_end();
>  	}
>  	return sha256_base_finish(desc, out);
> diff --git a/arch/arm64/crypto/sha512-ce-glue.c b/arch/arm64/crypto/sha512-ce-glue.c
> index 2369540040aa..0f964235d753 100644
> --- a/arch/arm64/crypto/sha512-ce-glue.c
> +++ b/arch/arm64/crypto/sha512-ce-glue.c
> @@ -27,18 +27,18 @@ MODULE_LICENSE("GPL v2");
>  asmlinkage void sha512_ce_transform(struct sha512_state *sst, u8 const *src,
>  				    int blocks);
>  
> -asmlinkage void sha512_block_data_order(u64 *digest, u8 const *src, int blocks);
> +asmlinkage void sha512_block_data_order(struct sha512_state *sst, u8 const *src,
> +					int blocks);
>  
>  static int sha512_ce_update(struct shash_desc *desc, const u8 *data,
>  			    unsigned int len)
>  {
>  	if (!crypto_simd_usable())
>  		return sha512_base_do_update(desc, data, len,
> -				(sha512_block_fn *)sha512_block_data_order);
> +					     sha512_block_data_order);
>  
>  	kernel_neon_begin();
> -	sha512_base_do_update(desc, data, len,
> -			      (sha512_block_fn *)sha512_ce_transform);
> +	sha512_base_do_update(desc, data, len, sha512_ce_transform);
>  	kernel_neon_end();
>  
>  	return 0;
> @@ -50,16 +50,14 @@ static int sha512_ce_finup(struct shash_desc *desc, const u8 *data,
>  	if (!crypto_simd_usable()) {
>  		if (len)
>  			sha512_base_do_update(desc, data, len,
> -				(sha512_block_fn *)sha512_block_data_order);
> -		sha512_base_do_finalize(desc,
> -				(sha512_block_fn *)sha512_block_data_order);
> +					      sha512_block_data_order);
> +		sha512_base_do_finalize(desc, sha512_block_data_order);
>  		return sha512_base_finish(desc, out);
>  	}
>  
>  	kernel_neon_begin();
> -	sha512_base_do_update(desc, data, len,
> -			      (sha512_block_fn *)sha512_ce_transform);
> -	sha512_base_do_finalize(desc, (sha512_block_fn *)sha512_ce_transform);
> +	sha512_base_do_update(desc, data, len, sha512_ce_transform);
> +	sha512_base_do_finalize(desc, sha512_ce_transform);
>  	kernel_neon_end();
>  	return sha512_base_finish(desc, out);
>  }
> @@ -67,13 +65,12 @@ static int sha512_ce_finup(struct shash_desc *desc, const u8 *data,
>  static int sha512_ce_final(struct shash_desc *desc, u8 *out)
>  {
>  	if (!crypto_simd_usable()) {
> -		sha512_base_do_finalize(desc,
> -				(sha512_block_fn *)sha512_block_data_order);
> +		sha512_base_do_finalize(desc, sha512_block_data_order);
>  		return sha512_base_finish(desc, out);
>  	}
>  
>  	kernel_neon_begin();
> -	sha512_base_do_finalize(desc, (sha512_block_fn *)sha512_ce_transform);
> +	sha512_base_do_finalize(desc, sha512_ce_transform);
>  	kernel_neon_end();
>  	return sha512_base_finish(desc, out);
>  }
> diff --git a/arch/arm64/crypto/sha512-glue.c b/arch/arm64/crypto/sha512-glue.c
> index d915c656e5fe..0f6b610a7954 100644
> --- a/arch/arm64/crypto/sha512-glue.c
> +++ b/arch/arm64/crypto/sha512-glue.c
> @@ -20,25 +20,22 @@ MODULE_LICENSE("GPL v2");
>  MODULE_ALIAS_CRYPTO("sha384");
>  MODULE_ALIAS_CRYPTO("sha512");
>  
> -asmlinkage void sha512_block_data_order(u32 *digest, const void *data,
> -					unsigned int num_blks);
> +asmlinkage void sha512_block_data_order(struct sha512_state *sst,
> +					u8 const *src, int blocks);
>  EXPORT_SYMBOL(sha512_block_data_order);
>  
>  static int sha512_update(struct shash_desc *desc, const u8 *data,
>  			 unsigned int len)
>  {
> -	return sha512_base_do_update(desc, data, len,
> -			(sha512_block_fn *)sha512_block_data_order);
> +	return sha512_base_do_update(desc, data, len, sha512_block_data_order);
>  }
>  
>  static int sha512_finup(struct shash_desc *desc, const u8 *data,
>  			unsigned int len, u8 *out)
>  {
>  	if (len)
> -		sha512_base_do_update(desc, data, len,
> -			(sha512_block_fn *)sha512_block_data_order);
> -	sha512_base_do_finalize(desc,
> -			(sha512_block_fn *)sha512_block_data_order);
> +		sha512_base_do_update(desc, data, len, sha512_block_data_order);
> +	sha512_base_do_finalize(desc, sha512_block_data_order);
>  
>  	return sha512_base_finish(desc, out);
>  }
> 
> base-commit: 100d46bd72ec689a5582c2f5f4deadc5bcb92d60
> -- 
> 2.24.0.rc1.363.gb1bccd3e3d-goog
>
Eric Biggers Nov. 13, 2019, 8:04 p.m. UTC | #2
On Tue, Nov 12, 2019 at 02:30:46PM -0800, Sami Tolvanen wrote:
> Declare assembly functions with the expected function type
> instead of casting pointers in C to avoid type mismatch failures
> with Control-Flow Integrity (CFI) checking.
> 
> Signed-off-by: Sami Tolvanen <samitolvanen@google.com>
> ---
>  arch/arm64/crypto/sha1-ce-glue.c   | 12 +++++-------
>  arch/arm64/crypto/sha2-ce-glue.c   | 26 +++++++++++---------------
>  arch/arm64/crypto/sha256-glue.c    | 30 ++++++++++++------------------
>  arch/arm64/crypto/sha512-ce-glue.c | 23 ++++++++++-------------
>  arch/arm64/crypto/sha512-glue.c    | 13 +++++--------
>  5 files changed, 43 insertions(+), 61 deletions(-)
> 
> diff --git a/arch/arm64/crypto/sha1-ce-glue.c b/arch/arm64/crypto/sha1-ce-glue.c
> index bdc1b6d7aff7..3153a9bbb683 100644
> --- a/arch/arm64/crypto/sha1-ce-glue.c
> +++ b/arch/arm64/crypto/sha1-ce-glue.c
> @@ -25,7 +25,7 @@ struct sha1_ce_state {
>  	u32			finalize;
>  };
>  
> -asmlinkage void sha1_ce_transform(struct sha1_ce_state *sst, u8 const *src,
> +asmlinkage void sha1_ce_transform(struct sha1_state *sst, u8 const *src,
>  				  int blocks);

Please update the comments in the corresponding assembly files too.

Also, this change doesn't really make sense because the assembly functions still
expect struct sha1_ce_state, and they access sha1_ce_state::finalize which is
not present in struct sha1_state.  There should either be wrapper functions that
explicitly do the cast from sha1_state to sha1_ce_state, or there should be
comments in the assembly files that very clearly explain that although the
function prototype takes sha1_state, it's really assumed to be a sha1_ce_state.

Likewise for SHA-256 and SHA-512.

- Eric
Sami Tolvanen Nov. 13, 2019, 10:28 p.m. UTC | #3
On Wed, Nov 13, 2019 at 12:04 PM Eric Biggers <ebiggers@kernel.org> wrote:
>
> On Tue, Nov 12, 2019 at 02:30:46PM -0800, Sami Tolvanen wrote:
> > Declare assembly functions with the expected function type
> > instead of casting pointers in C to avoid type mismatch failures
> > with Control-Flow Integrity (CFI) checking.
> >
> > Signed-off-by: Sami Tolvanen <samitolvanen@google.com>
> > ---
> >  arch/arm64/crypto/sha1-ce-glue.c   | 12 +++++-------
> >  arch/arm64/crypto/sha2-ce-glue.c   | 26 +++++++++++---------------
> >  arch/arm64/crypto/sha256-glue.c    | 30 ++++++++++++------------------
> >  arch/arm64/crypto/sha512-ce-glue.c | 23 ++++++++++-------------
> >  arch/arm64/crypto/sha512-glue.c    | 13 +++++--------
> >  5 files changed, 43 insertions(+), 61 deletions(-)
> >
> > diff --git a/arch/arm64/crypto/sha1-ce-glue.c b/arch/arm64/crypto/sha1-ce-glue.c
> > index bdc1b6d7aff7..3153a9bbb683 100644
> > --- a/arch/arm64/crypto/sha1-ce-glue.c
> > +++ b/arch/arm64/crypto/sha1-ce-glue.c
> > @@ -25,7 +25,7 @@ struct sha1_ce_state {
> >       u32                     finalize;
> >  };
> >
> > -asmlinkage void sha1_ce_transform(struct sha1_ce_state *sst, u8 const *src,
> > +asmlinkage void sha1_ce_transform(struct sha1_state *sst, u8 const *src,
> >                                 int blocks);
>
> Please update the comments in the corresponding assembly files too.
>
> Also, this change doesn't really make sense because the assembly functions still
> expect struct sha1_ce_state, and they access sha1_ce_state::finalize which is
> not present in struct sha1_state.  There should either be wrapper functions that
> explicitly do the cast from sha1_state to sha1_ce_state, or there should be
> comments in the assembly files that very clearly explain that although the
> function prototype takes sha1_state, it's really assumed to be a sha1_ce_state.

Agreed, this needs a comment explaining the type mismatch. I'm also
fine with using wrapper functions and explicitly casting the
parameters instead of changing function declarations. Herbert, Ard,
any preferences?

Sami
Ard Biesheuvel Nov. 14, 2019, 9:45 a.m. UTC | #4
On Wed, 13 Nov 2019 at 22:28, Sami Tolvanen <samitolvanen@google.com> wrote:
>
> On Wed, Nov 13, 2019 at 12:04 PM Eric Biggers <ebiggers@kernel.org> wrote:
> >
> > On Tue, Nov 12, 2019 at 02:30:46PM -0800, Sami Tolvanen wrote:
> > > Declare assembly functions with the expected function type
> > > instead of casting pointers in C to avoid type mismatch failures
> > > with Control-Flow Integrity (CFI) checking.
> > >
> > > Signed-off-by: Sami Tolvanen <samitolvanen@google.com>
> > > ---
> > >  arch/arm64/crypto/sha1-ce-glue.c   | 12 +++++-------
> > >  arch/arm64/crypto/sha2-ce-glue.c   | 26 +++++++++++---------------
> > >  arch/arm64/crypto/sha256-glue.c    | 30 ++++++++++++------------------
> > >  arch/arm64/crypto/sha512-ce-glue.c | 23 ++++++++++-------------
> > >  arch/arm64/crypto/sha512-glue.c    | 13 +++++--------
> > >  5 files changed, 43 insertions(+), 61 deletions(-)
> > >
> > > diff --git a/arch/arm64/crypto/sha1-ce-glue.c b/arch/arm64/crypto/sha1-ce-glue.c
> > > index bdc1b6d7aff7..3153a9bbb683 100644
> > > --- a/arch/arm64/crypto/sha1-ce-glue.c
> > > +++ b/arch/arm64/crypto/sha1-ce-glue.c
> > > @@ -25,7 +25,7 @@ struct sha1_ce_state {
> > >       u32                     finalize;
> > >  };
> > >
> > > -asmlinkage void sha1_ce_transform(struct sha1_ce_state *sst, u8 const *src,
> > > +asmlinkage void sha1_ce_transform(struct sha1_state *sst, u8 const *src,
> > >                                 int blocks);
> >
> > Please update the comments in the corresponding assembly files too.
> >
> > Also, this change doesn't really make sense because the assembly functions still
> > expect struct sha1_ce_state, and they access sha1_ce_state::finalize which is
> > not present in struct sha1_state.  There should either be wrapper functions that
> > explicitly do the cast from sha1_state to sha1_ce_state, or there should be
> > comments in the assembly files that very clearly explain that although the
> > function prototype takes sha1_state, it's really assumed to be a sha1_ce_state.
>
> Agreed, this needs a comment explaining the type mismatch. I'm also
> fine with using wrapper functions and explicitly casting the
> parameters instead of changing function declarations. Herbert, Ard,
> any preferences?
>

I guess the former would be cleaner, using container_of() rather than
a blind cast to make the code more self-documenting. The extra branch
shouldn't really matter.
Sami Tolvanen Nov. 14, 2019, 6:21 p.m. UTC | #5
On Thu, Nov 14, 2019 at 1:45 AM Ard Biesheuvel
<ard.biesheuvel@linaro.org> wrote:
>
> On Wed, 13 Nov 2019 at 22:28, Sami Tolvanen <samitolvanen@google.com> wrote:
> >
> > On Wed, Nov 13, 2019 at 12:04 PM Eric Biggers <ebiggers@kernel.org> wrote:
> > >
> > > On Tue, Nov 12, 2019 at 02:30:46PM -0800, Sami Tolvanen wrote:
> > > > Declare assembly functions with the expected function type
> > > > instead of casting pointers in C to avoid type mismatch failures
> > > > with Control-Flow Integrity (CFI) checking.
> > > >
> > > > Signed-off-by: Sami Tolvanen <samitolvanen@google.com>
> > > > ---
> > > >  arch/arm64/crypto/sha1-ce-glue.c   | 12 +++++-------
> > > >  arch/arm64/crypto/sha2-ce-glue.c   | 26 +++++++++++---------------
> > > >  arch/arm64/crypto/sha256-glue.c    | 30 ++++++++++++------------------
> > > >  arch/arm64/crypto/sha512-ce-glue.c | 23 ++++++++++-------------
> > > >  arch/arm64/crypto/sha512-glue.c    | 13 +++++--------
> > > >  5 files changed, 43 insertions(+), 61 deletions(-)
> > > >
> > > > diff --git a/arch/arm64/crypto/sha1-ce-glue.c b/arch/arm64/crypto/sha1-ce-glue.c
> > > > index bdc1b6d7aff7..3153a9bbb683 100644
> > > > --- a/arch/arm64/crypto/sha1-ce-glue.c
> > > > +++ b/arch/arm64/crypto/sha1-ce-glue.c
> > > > @@ -25,7 +25,7 @@ struct sha1_ce_state {
> > > >       u32                     finalize;
> > > >  };
> > > >
> > > > -asmlinkage void sha1_ce_transform(struct sha1_ce_state *sst, u8 const *src,
> > > > +asmlinkage void sha1_ce_transform(struct sha1_state *sst, u8 const *src,
> > > >                                 int blocks);
> > >
> > > Please update the comments in the corresponding assembly files too.
> > >
> > > Also, this change doesn't really make sense because the assembly functions still
> > > expect struct sha1_ce_state, and they access sha1_ce_state::finalize which is
> > > not present in struct sha1_state.  There should either be wrapper functions that
> > > explicitly do the cast from sha1_state to sha1_ce_state, or there should be
> > > comments in the assembly files that very clearly explain that although the
> > > function prototype takes sha1_state, it's really assumed to be a sha1_ce_state.
> >
> > Agreed, this needs a comment explaining the type mismatch. I'm also
> > fine with using wrapper functions and explicitly casting the
> > parameters instead of changing function declarations. Herbert, Ard,
> > any preferences?
> >
>
> I guess the former would be cleaner, using container_of() rather than
> a blind cast to make the code more self-documenting. The extra branch
> shouldn't really matter.

Sure, using container_of() sounds like a better option, I'll use that
in v2. Thanks!

Sami
diff mbox series

Patch

diff --git a/arch/arm64/crypto/sha1-ce-glue.c b/arch/arm64/crypto/sha1-ce-glue.c
index bdc1b6d7aff7..3153a9bbb683 100644
--- a/arch/arm64/crypto/sha1-ce-glue.c
+++ b/arch/arm64/crypto/sha1-ce-glue.c
@@ -25,7 +25,7 @@  struct sha1_ce_state {
 	u32			finalize;
 };
 
-asmlinkage void sha1_ce_transform(struct sha1_ce_state *sst, u8 const *src,
+asmlinkage void sha1_ce_transform(struct sha1_state *sst, u8 const *src,
 				  int blocks);
 
 const u32 sha1_ce_offsetof_count = offsetof(struct sha1_ce_state, sst.count);
@@ -41,8 +41,7 @@  static int sha1_ce_update(struct shash_desc *desc, const u8 *data,
 
 	sctx->finalize = 0;
 	kernel_neon_begin();
-	sha1_base_do_update(desc, data, len,
-			    (sha1_block_fn *)sha1_ce_transform);
+	sha1_base_do_update(desc, data, len, sha1_ce_transform);
 	kernel_neon_end();
 
 	return 0;
@@ -64,10 +63,9 @@  static int sha1_ce_finup(struct shash_desc *desc, const u8 *data,
 	sctx->finalize = finalize;
 
 	kernel_neon_begin();
-	sha1_base_do_update(desc, data, len,
-			    (sha1_block_fn *)sha1_ce_transform);
+	sha1_base_do_update(desc, data, len, sha1_ce_transform);
 	if (!finalize)
-		sha1_base_do_finalize(desc, (sha1_block_fn *)sha1_ce_transform);
+		sha1_base_do_finalize(desc, sha1_ce_transform);
 	kernel_neon_end();
 	return sha1_base_finish(desc, out);
 }
@@ -81,7 +79,7 @@  static int sha1_ce_final(struct shash_desc *desc, u8 *out)
 
 	sctx->finalize = 0;
 	kernel_neon_begin();
-	sha1_base_do_finalize(desc, (sha1_block_fn *)sha1_ce_transform);
+	sha1_base_do_finalize(desc, sha1_ce_transform);
 	kernel_neon_end();
 	return sha1_base_finish(desc, out);
 }
diff --git a/arch/arm64/crypto/sha2-ce-glue.c b/arch/arm64/crypto/sha2-ce-glue.c
index 604a01a4ede6..a4dacedfe4d4 100644
--- a/arch/arm64/crypto/sha2-ce-glue.c
+++ b/arch/arm64/crypto/sha2-ce-glue.c
@@ -25,7 +25,7 @@  struct sha256_ce_state {
 	u32			finalize;
 };
 
-asmlinkage void sha2_ce_transform(struct sha256_ce_state *sst, u8 const *src,
+asmlinkage void sha2_ce_transform(struct sha256_state *sst, u8 const *src,
 				  int blocks);
 
 const u32 sha256_ce_offsetof_count = offsetof(struct sha256_ce_state,
@@ -33,7 +33,8 @@  const u32 sha256_ce_offsetof_count = offsetof(struct sha256_ce_state,
 const u32 sha256_ce_offsetof_finalize = offsetof(struct sha256_ce_state,
 						 finalize);
 
-asmlinkage void sha256_block_data_order(u32 *digest, u8 const *src, int blocks);
+asmlinkage void sha256_block_data_order(struct sha256_state *sst, u8 const *src,
+					int blocks);
 
 static int sha256_ce_update(struct shash_desc *desc, const u8 *data,
 			    unsigned int len)
@@ -42,12 +43,11 @@  static int sha256_ce_update(struct shash_desc *desc, const u8 *data,
 
 	if (!crypto_simd_usable())
 		return sha256_base_do_update(desc, data, len,
-				(sha256_block_fn *)sha256_block_data_order);
+				sha256_block_data_order);
 
 	sctx->finalize = 0;
 	kernel_neon_begin();
-	sha256_base_do_update(desc, data, len,
-			      (sha256_block_fn *)sha2_ce_transform);
+	sha256_base_do_update(desc, data, len, sha2_ce_transform);
 	kernel_neon_end();
 
 	return 0;
@@ -62,9 +62,8 @@  static int sha256_ce_finup(struct shash_desc *desc, const u8 *data,
 	if (!crypto_simd_usable()) {
 		if (len)
 			sha256_base_do_update(desc, data, len,
-				(sha256_block_fn *)sha256_block_data_order);
-		sha256_base_do_finalize(desc,
-				(sha256_block_fn *)sha256_block_data_order);
+				sha256_block_data_order);
+		sha256_base_do_finalize(desc, sha256_block_data_order);
 		return sha256_base_finish(desc, out);
 	}
 
@@ -75,11 +74,9 @@  static int sha256_ce_finup(struct shash_desc *desc, const u8 *data,
 	sctx->finalize = finalize;
 
 	kernel_neon_begin();
-	sha256_base_do_update(desc, data, len,
-			      (sha256_block_fn *)sha2_ce_transform);
+	sha256_base_do_update(desc, data, len, sha2_ce_transform);
 	if (!finalize)
-		sha256_base_do_finalize(desc,
-					(sha256_block_fn *)sha2_ce_transform);
+		sha256_base_do_finalize(desc, sha2_ce_transform);
 	kernel_neon_end();
 	return sha256_base_finish(desc, out);
 }
@@ -89,14 +86,13 @@  static int sha256_ce_final(struct shash_desc *desc, u8 *out)
 	struct sha256_ce_state *sctx = shash_desc_ctx(desc);
 
 	if (!crypto_simd_usable()) {
-		sha256_base_do_finalize(desc,
-				(sha256_block_fn *)sha256_block_data_order);
+		sha256_base_do_finalize(desc, sha256_block_data_order);
 		return sha256_base_finish(desc, out);
 	}
 
 	sctx->finalize = 0;
 	kernel_neon_begin();
-	sha256_base_do_finalize(desc, (sha256_block_fn *)sha2_ce_transform);
+	sha256_base_do_finalize(desc, sha2_ce_transform);
 	kernel_neon_end();
 	return sha256_base_finish(desc, out);
 }
diff --git a/arch/arm64/crypto/sha256-glue.c b/arch/arm64/crypto/sha256-glue.c
index e273faca924f..dac3157937ba 100644
--- a/arch/arm64/crypto/sha256-glue.c
+++ b/arch/arm64/crypto/sha256-glue.c
@@ -23,28 +23,25 @@  MODULE_LICENSE("GPL v2");
 MODULE_ALIAS_CRYPTO("sha224");
 MODULE_ALIAS_CRYPTO("sha256");
 
-asmlinkage void sha256_block_data_order(u32 *digest, const void *data,
-					unsigned int num_blks);
+asmlinkage void sha256_block_data_order(struct sha256_state *sst, u8 const *src,
+					int blocks);
 EXPORT_SYMBOL(sha256_block_data_order);
 
-asmlinkage void sha256_block_neon(u32 *digest, const void *data,
-				  unsigned int num_blks);
+asmlinkage void sha256_block_neon(struct sha256_state *sst, u8 const *src,
+				  int blocks);
 
 static int crypto_sha256_arm64_update(struct shash_desc *desc, const u8 *data,
 				      unsigned int len)
 {
-	return sha256_base_do_update(desc, data, len,
-				(sha256_block_fn *)sha256_block_data_order);
+	return sha256_base_do_update(desc, data, len, sha256_block_data_order);
 }
 
 static int crypto_sha256_arm64_finup(struct shash_desc *desc, const u8 *data,
 				     unsigned int len, u8 *out)
 {
 	if (len)
-		sha256_base_do_update(desc, data, len,
-				(sha256_block_fn *)sha256_block_data_order);
-	sha256_base_do_finalize(desc,
-				(sha256_block_fn *)sha256_block_data_order);
+		sha256_base_do_update(desc, data, len, sha256_block_data_order);
+	sha256_base_do_finalize(desc, sha256_block_data_order);
 
 	return sha256_base_finish(desc, out);
 }
@@ -87,7 +84,7 @@  static int sha256_update_neon(struct shash_desc *desc, const u8 *data,
 
 	if (!crypto_simd_usable())
 		return sha256_base_do_update(desc, data, len,
-				(sha256_block_fn *)sha256_block_data_order);
+				sha256_block_data_order);
 
 	while (len > 0) {
 		unsigned int chunk = len;
@@ -103,8 +100,7 @@  static int sha256_update_neon(struct shash_desc *desc, const u8 *data,
 				sctx->count % SHA256_BLOCK_SIZE;
 
 		kernel_neon_begin();
-		sha256_base_do_update(desc, data, chunk,
-				      (sha256_block_fn *)sha256_block_neon);
+		sha256_base_do_update(desc, data, chunk, sha256_block_neon);
 		kernel_neon_end();
 		data += chunk;
 		len -= chunk;
@@ -118,15 +114,13 @@  static int sha256_finup_neon(struct shash_desc *desc, const u8 *data,
 	if (!crypto_simd_usable()) {
 		if (len)
 			sha256_base_do_update(desc, data, len,
-				(sha256_block_fn *)sha256_block_data_order);
-		sha256_base_do_finalize(desc,
-				(sha256_block_fn *)sha256_block_data_order);
+				sha256_block_data_order);
+		sha256_base_do_finalize(desc, sha256_block_data_order);
 	} else {
 		if (len)
 			sha256_update_neon(desc, data, len);
 		kernel_neon_begin();
-		sha256_base_do_finalize(desc,
-				(sha256_block_fn *)sha256_block_neon);
+		sha256_base_do_finalize(desc, sha256_block_neon);
 		kernel_neon_end();
 	}
 	return sha256_base_finish(desc, out);
diff --git a/arch/arm64/crypto/sha512-ce-glue.c b/arch/arm64/crypto/sha512-ce-glue.c
index 2369540040aa..0f964235d753 100644
--- a/arch/arm64/crypto/sha512-ce-glue.c
+++ b/arch/arm64/crypto/sha512-ce-glue.c
@@ -27,18 +27,18 @@  MODULE_LICENSE("GPL v2");
 asmlinkage void sha512_ce_transform(struct sha512_state *sst, u8 const *src,
 				    int blocks);
 
-asmlinkage void sha512_block_data_order(u64 *digest, u8 const *src, int blocks);
+asmlinkage void sha512_block_data_order(struct sha512_state *sst, u8 const *src,
+					int blocks);
 
 static int sha512_ce_update(struct shash_desc *desc, const u8 *data,
 			    unsigned int len)
 {
 	if (!crypto_simd_usable())
 		return sha512_base_do_update(desc, data, len,
-				(sha512_block_fn *)sha512_block_data_order);
+					     sha512_block_data_order);
 
 	kernel_neon_begin();
-	sha512_base_do_update(desc, data, len,
-			      (sha512_block_fn *)sha512_ce_transform);
+	sha512_base_do_update(desc, data, len, sha512_ce_transform);
 	kernel_neon_end();
 
 	return 0;
@@ -50,16 +50,14 @@  static int sha512_ce_finup(struct shash_desc *desc, const u8 *data,
 	if (!crypto_simd_usable()) {
 		if (len)
 			sha512_base_do_update(desc, data, len,
-				(sha512_block_fn *)sha512_block_data_order);
-		sha512_base_do_finalize(desc,
-				(sha512_block_fn *)sha512_block_data_order);
+					      sha512_block_data_order);
+		sha512_base_do_finalize(desc, sha512_block_data_order);
 		return sha512_base_finish(desc, out);
 	}
 
 	kernel_neon_begin();
-	sha512_base_do_update(desc, data, len,
-			      (sha512_block_fn *)sha512_ce_transform);
-	sha512_base_do_finalize(desc, (sha512_block_fn *)sha512_ce_transform);
+	sha512_base_do_update(desc, data, len, sha512_ce_transform);
+	sha512_base_do_finalize(desc, sha512_ce_transform);
 	kernel_neon_end();
 	return sha512_base_finish(desc, out);
 }
@@ -67,13 +65,12 @@  static int sha512_ce_finup(struct shash_desc *desc, const u8 *data,
 static int sha512_ce_final(struct shash_desc *desc, u8 *out)
 {
 	if (!crypto_simd_usable()) {
-		sha512_base_do_finalize(desc,
-				(sha512_block_fn *)sha512_block_data_order);
+		sha512_base_do_finalize(desc, sha512_block_data_order);
 		return sha512_base_finish(desc, out);
 	}
 
 	kernel_neon_begin();
-	sha512_base_do_finalize(desc, (sha512_block_fn *)sha512_ce_transform);
+	sha512_base_do_finalize(desc, sha512_ce_transform);
 	kernel_neon_end();
 	return sha512_base_finish(desc, out);
 }
diff --git a/arch/arm64/crypto/sha512-glue.c b/arch/arm64/crypto/sha512-glue.c
index d915c656e5fe..0f6b610a7954 100644
--- a/arch/arm64/crypto/sha512-glue.c
+++ b/arch/arm64/crypto/sha512-glue.c
@@ -20,25 +20,22 @@  MODULE_LICENSE("GPL v2");
 MODULE_ALIAS_CRYPTO("sha384");
 MODULE_ALIAS_CRYPTO("sha512");
 
-asmlinkage void sha512_block_data_order(u32 *digest, const void *data,
-					unsigned int num_blks);
+asmlinkage void sha512_block_data_order(struct sha512_state *sst,
+					u8 const *src, int blocks);
 EXPORT_SYMBOL(sha512_block_data_order);
 
 static int sha512_update(struct shash_desc *desc, const u8 *data,
 			 unsigned int len)
 {
-	return sha512_base_do_update(desc, data, len,
-			(sha512_block_fn *)sha512_block_data_order);
+	return sha512_base_do_update(desc, data, len, sha512_block_data_order);
 }
 
 static int sha512_finup(struct shash_desc *desc, const u8 *data,
 			unsigned int len, u8 *out)
 {
 	if (len)
-		sha512_base_do_update(desc, data, len,
-			(sha512_block_fn *)sha512_block_data_order);
-	sha512_base_do_finalize(desc,
-			(sha512_block_fn *)sha512_block_data_order);
+		sha512_base_do_update(desc, data, len, sha512_block_data_order);
+	sha512_base_do_finalize(desc, sha512_block_data_order);
 
 	return sha512_base_finish(desc, out);
 }