diff mbox series

[bpf-next,v3,1/2] bpf: add skcipher API support to TC/XDP programs

Message ID 20231031134900.1432945-1-vadfed@meta.com (mailing list archive)
State Not Applicable
Delegated to: Herbert Xu
Headers show
Series [bpf-next,v3,1/2] bpf: add skcipher API support to TC/XDP programs | expand

Commit Message

Vadim Fedorenko Oct. 31, 2023, 1:48 p.m. UTC
Add crypto API support to BPF to be able to decrypt or encrypt packets
in TC/XDP BPF programs. Only symmetric key ciphers are supported for
now. Special care should be taken for initialization part of crypto algo
because crypto_alloc_sync_skcipher() doesn't work with preemtion
disabled, it can be run only in sleepable BPF program. Also async crypto
is not supported because of the very same issue - TC/XDP BPF programs
are not sleepable.

Signed-off-by: Vadim Fedorenko <vadfed@meta.com>
---
v2 -> v3:
- fix kdoc issues
v1 -> v2:
- use kmalloc in sleepable func, suggested by Alexei
- use __bpf_dynptr_is_rdonly() to check destination, suggested by Jakub
- use __bpf_dynptr_data_ptr() for all dynptr accesses

 include/linux/bpf.h   |   2 +
 kernel/bpf/Makefile   |   3 +
 kernel/bpf/crypto.c   | 280 ++++++++++++++++++++++++++++++++++++++++++
 kernel/bpf/helpers.c  |   4 +-
 kernel/bpf/verifier.c |   1 +
 5 files changed, 288 insertions(+), 2 deletions(-)
 create mode 100644 kernel/bpf/crypto.c

Comments

Jiri Olsa Oct. 31, 2023, 3:46 p.m. UTC | #1
On Tue, Oct 31, 2023 at 06:48:59AM -0700, Vadim Fedorenko wrote:

SNIP

> diff --git a/kernel/bpf/crypto.c b/kernel/bpf/crypto.c
> new file mode 100644
> index 000000000000..c2a0703934fc
> --- /dev/null
> +++ b/kernel/bpf/crypto.c
> @@ -0,0 +1,280 @@
> +// SPDX-License-Identifier: GPL-2.0-only
> +/* Copyright (c) 2023 Meta, Inc */
> +#include <linux/bpf.h>
> +#include <linux/bpf_mem_alloc.h>
> +#include <linux/btf.h>
> +#include <linux/btf_ids.h>
> +#include <linux/filter.h>
> +#include <linux/scatterlist.h>
> +#include <linux/skbuff.h>
> +#include <crypto/skcipher.h>
> +
> +/**
> + * struct bpf_crypto_skcipher_ctx - refcounted BPF sync skcipher context structure
> + * @tfm:	The pointer to crypto_sync_skcipher struct.
> + * @rcu:	The RCU head used to free the crypto context with RCU safety.
> + * @usage:	Object reference counter. When the refcount goes to 0, the
> + *		memory is released back to the BPF allocator, which provides
> + *		RCU safety.
> + */
> +struct bpf_crypto_skcipher_ctx {
> +	struct crypto_sync_skcipher *tfm;
> +	struct rcu_head rcu;
> +	refcount_t usage;
> +};
> +
> +__diag_push();
> +__diag_ignore_all("-Wmissing-prototypes",
> +		  "Global kfuncs as their definitions will be in BTF");
> +

hi,
just a heads up.. there's [1] change adding macro for this, it's not
merged yet, but will probably get in soon

jirka

[1] https://lore.kernel.org/bpf/20231030210638.2415306-1-davemarchevsky@fb.com/

> +static void *__bpf_dynptr_data_ptr(const struct bpf_dynptr_kern *ptr)
> +{
> +	enum bpf_dynptr_type type;
> +
> +	if (!ptr->data)
> +		return NULL;
> +
> +	type = bpf_dynptr_get_type(ptr);
> +
> +	switch (type) {
> +	case BPF_DYNPTR_TYPE_LOCAL:
> +	case BPF_DYNPTR_TYPE_RINGBUF:
> +		return ptr->data + ptr->offset;
> +	case BPF_DYNPTR_TYPE_SKB:
> +		return skb_pointer_if_linear(ptr->data, ptr->offset, __bpf_dynptr_size(ptr));
> +	case BPF_DYNPTR_TYPE_XDP:
> +	{
> +		void *xdp_ptr = bpf_xdp_pointer(ptr->data, ptr->offset, __bpf_dynptr_size(ptr));
> +		if (!IS_ERR_OR_NULL(xdp_ptr))
> +			return xdp_ptr;
> +
> +		return NULL;
> +	}
> +	default:
> +		WARN_ONCE(true, "unknown dynptr type %d\n", type);
> +		return NULL;
> +	}
> +}
> +
> +/**
> + * bpf_crypto_skcipher_ctx_create() - Create a mutable BPF crypto context.
> + *
> + * Allocates a crypto context that can be used, acquired, and released by
> + * a BPF program. The crypto context returned by this function must either
> + * be embedded in a map as a kptr, or freed with bpf_crypto_skcipher_ctx_release().
> + *
> + * bpf_crypto_skcipher_ctx_create() allocates memory using the BPF memory
> + * allocator, and will not block. It may return NULL if no memory is available.
> + * @algo: bpf_dynptr which holds string representation of algorithm.
> + * @key:  bpf_dynptr which holds cipher key to do crypto.
> + */
> +__bpf_kfunc struct bpf_crypto_skcipher_ctx *
> +bpf_crypto_skcipher_ctx_create(const struct bpf_dynptr_kern *palgo,
> +			       const struct bpf_dynptr_kern *pkey, int *err)
> +{
> +	struct bpf_crypto_skcipher_ctx *ctx;
> +	char *algo;
> +
> +	if (__bpf_dynptr_size(palgo) > CRYPTO_MAX_ALG_NAME) {
> +		*err = -EINVAL;
> +		return NULL;
> +	}
> +
> +	algo = __bpf_dynptr_data_ptr(palgo);
> +
> +	if (!crypto_has_skcipher(algo, CRYPTO_ALG_TYPE_SKCIPHER, CRYPTO_ALG_TYPE_MASK)) {
> +		*err = -EOPNOTSUPP;
> +		return NULL;
> +	}
> +
> +	ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
> +	if (!ctx) {
> +		*err = -ENOMEM;
> +		return NULL;
> +	}
> +
> +	memset(ctx, 0, sizeof(*ctx));
> +
> +	ctx->tfm = crypto_alloc_sync_skcipher(algo, 0, 0);
> +	if (IS_ERR(ctx->tfm)) {
> +		*err = PTR_ERR(ctx->tfm);
> +		ctx->tfm = NULL;
> +		goto err;
> +	}
> +
> +	*err = crypto_sync_skcipher_setkey(ctx->tfm, __bpf_dynptr_data_ptr(pkey),
> +					   __bpf_dynptr_size(pkey));
> +	if (*err)
> +		goto err;
> +
> +	refcount_set(&ctx->usage, 1);
> +
> +	return ctx;
> +err:
> +	if (ctx->tfm)
> +		crypto_free_sync_skcipher(ctx->tfm);
> +	kfree(ctx);
> +
> +	return NULL;
> +}
> +
> +static void crypto_free_sync_skcipher_cb(struct rcu_head *head)
> +{
> +	struct bpf_crypto_skcipher_ctx *ctx;
> +
> +	ctx = container_of(head, struct bpf_crypto_skcipher_ctx, rcu);
> +	crypto_free_sync_skcipher(ctx->tfm);
> +	kfree(ctx);
> +}
> +
> +/**
> + * bpf_crypto_skcipher_ctx_acquire() - Acquire a reference to a BPF crypto context.
> + * @ctx: The BPF crypto context being acquired. The ctx must be a trusted
> + *	     pointer.
> + *
> + * Acquires a reference to a BPF crypto context. The context returned by this function
> + * must either be embedded in a map as a kptr, or freed with
> + * bpf_crypto_skcipher_ctx_release().
> + */
> +__bpf_kfunc struct bpf_crypto_skcipher_ctx *
> +bpf_crypto_skcipher_ctx_acquire(struct bpf_crypto_skcipher_ctx *ctx)
> +{
> +	refcount_inc(&ctx->usage);
> +	return ctx;
> +}
> +
> +/**
> + * bpf_crypto_skcipher_ctx_release() - Release a previously acquired BPF crypto context.
> + * @ctx: The crypto context being released.
> + *
> + * Releases a previously acquired reference to a BPF cpumask. When the final
> + * reference of the BPF cpumask has been released, it is subsequently freed in
> + * an RCU callback in the BPF memory allocator.
> + */
> +__bpf_kfunc void bpf_crypto_skcipher_ctx_release(struct bpf_crypto_skcipher_ctx *ctx)
> +{
> +	if (refcount_dec_and_test(&ctx->usage))
> +		call_rcu(&ctx->rcu, crypto_free_sync_skcipher_cb);
> +}
> +
> +static int bpf_crypto_skcipher_crypt(struct crypto_sync_skcipher *tfm,
> +				     const struct bpf_dynptr_kern *src,
> +				     struct bpf_dynptr_kern *dst,
> +				     const struct bpf_dynptr_kern *iv,
> +				     bool decrypt)
> +{
> +	struct skcipher_request *req = NULL;
> +	struct scatterlist sgin, sgout;
> +	int err;
> +
> +	if (crypto_sync_skcipher_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
> +		return -EINVAL;
> +
> +	if (__bpf_dynptr_is_rdonly(dst))
> +		return -EINVAL;
> +
> +	if (!__bpf_dynptr_size(dst) || !__bpf_dynptr_size(src))
> +		return -EINVAL;
> +
> +	if (__bpf_dynptr_size(iv) != crypto_sync_skcipher_ivsize(tfm))
> +		return -EINVAL;
> +
> +	req = skcipher_request_alloc(&tfm->base, GFP_ATOMIC);
> +	if (!req)
> +		return -ENOMEM;
> +
> +	sg_init_one(&sgin, __bpf_dynptr_data_ptr(src), __bpf_dynptr_size(src));
> +	sg_init_one(&sgout, __bpf_dynptr_data_ptr(dst), __bpf_dynptr_size(dst));
> +
> +	skcipher_request_set_crypt(req, &sgin, &sgout, __bpf_dynptr_size(src),
> +				   __bpf_dynptr_data_ptr(iv));
> +
> +	err = decrypt ? crypto_skcipher_decrypt(req) : crypto_skcipher_encrypt(req);
> +
> +	skcipher_request_free(req);
> +
> +	return err;
> +}
> +
> +/**
> + * bpf_crypto_skcipher_decrypt() - Decrypt buffer using configured context and IV provided.
> + * @ctx:	The crypto context being used. The ctx must be a trusted pointer.
> + * @src:	bpf_dynptr to the encrypted data. Must be a trusted pointer.
> + * @dst:	bpf_dynptr to the buffer where to store the result. Must be a trusted pointer.
> + * @iv:		bpf_dynptr to IV data to be used by decryptor.
> + *
> + * Decrypts provided buffer using IV data and the crypto context. Crypto context must be configured.
> + */
> +__bpf_kfunc int bpf_crypto_skcipher_decrypt(struct bpf_crypto_skcipher_ctx *ctx,
> +					    const struct bpf_dynptr_kern *src,
> +					    struct bpf_dynptr_kern *dst,
> +					    const struct bpf_dynptr_kern *iv)
> +{
> +	return bpf_crypto_skcipher_crypt(ctx->tfm, src, dst, iv, true);
> +}
> +
> +/**
> + * bpf_crypto_skcipher_encrypt() - Encrypt buffer using configured context and IV provided.
> + * @ctx:	The crypto context being used. The ctx must be a trusted pointer.
> + * @src:	bpf_dynptr to the plain data. Must be a trusted pointer.
> + * @dst:	bpf_dynptr to buffer where to store the result. Must be a trusted pointer.
> + * @iv:		bpf_dynptr to IV data to be used by decryptor.
> + *
> + * Encrypts provided buffer using IV data and the crypto context. Crypto context must be configured.
> + */
> +__bpf_kfunc int bpf_crypto_skcipher_encrypt(struct bpf_crypto_skcipher_ctx *ctx,
> +					    const struct bpf_dynptr_kern *src,
> +					    struct bpf_dynptr_kern *dst,
> +					    const struct bpf_dynptr_kern *iv)
> +{
> +	return bpf_crypto_skcipher_crypt(ctx->tfm, src, dst, iv, false);
> +}
> +
> +__diag_pop();
> +
> +BTF_SET8_START(crypt_skcipher_init_kfunc_btf_ids)
> +BTF_ID_FLAGS(func, bpf_crypto_skcipher_ctx_create, KF_ACQUIRE | KF_RET_NULL | KF_SLEEPABLE)
> +BTF_ID_FLAGS(func, bpf_crypto_skcipher_ctx_release, KF_RELEASE)
> +BTF_ID_FLAGS(func, bpf_crypto_skcipher_ctx_acquire, KF_ACQUIRE | KF_TRUSTED_ARGS)
> +BTF_SET8_END(crypt_skcipher_init_kfunc_btf_ids)
> +
> +static const struct btf_kfunc_id_set crypt_skcipher_init_kfunc_set = {
> +	.owner = THIS_MODULE,
> +	.set   = &crypt_skcipher_init_kfunc_btf_ids,
> +};
> +
> +BTF_SET8_START(crypt_skcipher_kfunc_btf_ids)
> +BTF_ID_FLAGS(func, bpf_crypto_skcipher_decrypt, KF_RCU)
> +BTF_ID_FLAGS(func, bpf_crypto_skcipher_encrypt, KF_RCU)
> +BTF_SET8_END(crypt_skcipher_kfunc_btf_ids)
> +
> +static const struct btf_kfunc_id_set crypt_skcipher_kfunc_set = {
> +	.owner = THIS_MODULE,
> +	.set   = &crypt_skcipher_kfunc_btf_ids,
> +};
> +
> +BTF_ID_LIST(crypto_skcipher_dtor_ids)
> +BTF_ID(struct, bpf_crypto_skcipher_ctx)
> +BTF_ID(func, bpf_crypto_skcipher_ctx_release)
> +
> +static int __init crypto_skcipher_kfunc_init(void)
> +{
> +	int ret;
> +	const struct btf_id_dtor_kfunc crypto_skcipher_dtors[] = {
> +		{
> +			.btf_id	      = crypto_skcipher_dtor_ids[0],
> +			.kfunc_btf_id = crypto_skcipher_dtor_ids[1]
> +		},
> +	};
> +
> +	ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_SCHED_CLS, &crypt_skcipher_kfunc_set);
> +	ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SCHED_ACT, &crypt_skcipher_kfunc_set);
> +	ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_XDP, &crypt_skcipher_kfunc_set);
> +	ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_UNSPEC,
> +					       &crypt_skcipher_init_kfunc_set);
> +	return  ret ?: register_btf_id_dtor_kfuncs(crypto_skcipher_dtors,
> +						   ARRAY_SIZE(crypto_skcipher_dtors),
> +						   THIS_MODULE);
> +}
> +
> +late_initcall(crypto_skcipher_kfunc_init);
> diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c
> index 62a53ebfedf9..2020884fca3d 100644
> --- a/kernel/bpf/helpers.c
> +++ b/kernel/bpf/helpers.c
> @@ -1429,7 +1429,7 @@ static const struct bpf_func_proto bpf_kptr_xchg_proto = {
>  #define DYNPTR_SIZE_MASK	0xFFFFFF
>  #define DYNPTR_RDONLY_BIT	BIT(31)
>  
> -static bool __bpf_dynptr_is_rdonly(const struct bpf_dynptr_kern *ptr)
> +bool __bpf_dynptr_is_rdonly(const struct bpf_dynptr_kern *ptr)
>  {
>  	return ptr->size & DYNPTR_RDONLY_BIT;
>  }
> @@ -1444,7 +1444,7 @@ static void bpf_dynptr_set_type(struct bpf_dynptr_kern *ptr, enum bpf_dynptr_typ
>  	ptr->size |= type << DYNPTR_TYPE_SHIFT;
>  }
>  
> -static enum bpf_dynptr_type bpf_dynptr_get_type(const struct bpf_dynptr_kern *ptr)
> +enum bpf_dynptr_type bpf_dynptr_get_type(const struct bpf_dynptr_kern *ptr)
>  {
>  	return (ptr->size & ~(DYNPTR_RDONLY_BIT)) >> DYNPTR_TYPE_SHIFT;
>  }
> diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
> index bb58987e4844..75d2f47ca3cb 100644
> --- a/kernel/bpf/verifier.c
> +++ b/kernel/bpf/verifier.c
> @@ -5184,6 +5184,7 @@ BTF_ID(struct, prog_test_ref_kfunc)
>  BTF_ID(struct, cgroup)
>  BTF_ID(struct, bpf_cpumask)
>  BTF_ID(struct, task_struct)
> +BTF_ID(struct, bpf_crypto_skcipher_ctx)
>  BTF_SET_END(rcu_protected_types)
>  
>  static bool rcu_protected_object(const struct btf *btf, u32 btf_id)
> -- 
> 2.39.3
> 
>
Vadim Fedorenko Oct. 31, 2023, 3:58 p.m. UTC | #2
On 31/10/2023 15:46, Jiri Olsa wrote:
> On Tue, Oct 31, 2023 at 06:48:59AM -0700, Vadim Fedorenko wrote:
> 
> SNIP
> 
>> diff --git a/kernel/bpf/crypto.c b/kernel/bpf/crypto.c
>> new file mode 100644
>> index 000000000000..c2a0703934fc
>> --- /dev/null
>> +++ b/kernel/bpf/crypto.c
>> @@ -0,0 +1,280 @@
>> +// SPDX-License-Identifier: GPL-2.0-only
>> +/* Copyright (c) 2023 Meta, Inc */
>> +#include <linux/bpf.h>
>> +#include <linux/bpf_mem_alloc.h>
>> +#include <linux/btf.h>
>> +#include <linux/btf_ids.h>
>> +#include <linux/filter.h>
>> +#include <linux/scatterlist.h>
>> +#include <linux/skbuff.h>
>> +#include <crypto/skcipher.h>
>> +
>> +/**
>> + * struct bpf_crypto_skcipher_ctx - refcounted BPF sync skcipher context structure
>> + * @tfm:	The pointer to crypto_sync_skcipher struct.
>> + * @rcu:	The RCU head used to free the crypto context with RCU safety.
>> + * @usage:	Object reference counter. When the refcount goes to 0, the
>> + *		memory is released back to the BPF allocator, which provides
>> + *		RCU safety.
>> + */
>> +struct bpf_crypto_skcipher_ctx {
>> +	struct crypto_sync_skcipher *tfm;
>> +	struct rcu_head rcu;
>> +	refcount_t usage;
>> +};
>> +
>> +__diag_push();
>> +__diag_ignore_all("-Wmissing-prototypes",
>> +		  "Global kfuncs as their definitions will be in BTF");
>> +
> 
> hi,
> just a heads up.. there's [1] change adding macro for this, it's not
> merged yet, but will probably get in soon
> 
> jirka
> 
> [1] https://lore.kernel.org/bpf/20231030210638.2415306-1-davemarchevsky@fb.com/
> 

Hi Jiri,

Thanks for heads up, I'll adjust the patchset when macro patch is merged.


>> +static void *__bpf_dynptr_data_ptr(const struct bpf_dynptr_kern *ptr)
>> +{
>> +	enum bpf_dynptr_type type;
>> +
>> +	if (!ptr->data)
>> +		return NULL;
>> +
>> +	type = bpf_dynptr_get_type(ptr);
>> +
>> +	switch (type) {
>> +	case BPF_DYNPTR_TYPE_LOCAL:
>> +	case BPF_DYNPTR_TYPE_RINGBUF:
>> +		return ptr->data + ptr->offset;
>> +	case BPF_DYNPTR_TYPE_SKB:
>> +		return skb_pointer_if_linear(ptr->data, ptr->offset, __bpf_dynptr_size(ptr));
>> +	case BPF_DYNPTR_TYPE_XDP:
>> +	{
>> +		void *xdp_ptr = bpf_xdp_pointer(ptr->data, ptr->offset, __bpf_dynptr_size(ptr));
>> +		if (!IS_ERR_OR_NULL(xdp_ptr))
>> +			return xdp_ptr;
>> +
>> +		return NULL;
>> +	}
>> +	default:
>> +		WARN_ONCE(true, "unknown dynptr type %d\n", type);
>> +		return NULL;
>> +	}
>> +}
>> +
>> +/**
>> + * bpf_crypto_skcipher_ctx_create() - Create a mutable BPF crypto context.
>> + *
>> + * Allocates a crypto context that can be used, acquired, and released by
>> + * a BPF program. The crypto context returned by this function must either
>> + * be embedded in a map as a kptr, or freed with bpf_crypto_skcipher_ctx_release().
>> + *
>> + * bpf_crypto_skcipher_ctx_create() allocates memory using the BPF memory
>> + * allocator, and will not block. It may return NULL if no memory is available.
>> + * @algo: bpf_dynptr which holds string representation of algorithm.
>> + * @key:  bpf_dynptr which holds cipher key to do crypto.
>> + */
>> +__bpf_kfunc struct bpf_crypto_skcipher_ctx *
>> +bpf_crypto_skcipher_ctx_create(const struct bpf_dynptr_kern *palgo,
>> +			       const struct bpf_dynptr_kern *pkey, int *err)
>> +{
>> +	struct bpf_crypto_skcipher_ctx *ctx;
>> +	char *algo;
>> +
>> +	if (__bpf_dynptr_size(palgo) > CRYPTO_MAX_ALG_NAME) {
>> +		*err = -EINVAL;
>> +		return NULL;
>> +	}
>> +
>> +	algo = __bpf_dynptr_data_ptr(palgo);
>> +
>> +	if (!crypto_has_skcipher(algo, CRYPTO_ALG_TYPE_SKCIPHER, CRYPTO_ALG_TYPE_MASK)) {
>> +		*err = -EOPNOTSUPP;
>> +		return NULL;
>> +	}
>> +
>> +	ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
>> +	if (!ctx) {
>> +		*err = -ENOMEM;
>> +		return NULL;
>> +	}
>> +
>> +	memset(ctx, 0, sizeof(*ctx));
>> +
>> +	ctx->tfm = crypto_alloc_sync_skcipher(algo, 0, 0);
>> +	if (IS_ERR(ctx->tfm)) {
>> +		*err = PTR_ERR(ctx->tfm);
>> +		ctx->tfm = NULL;
>> +		goto err;
>> +	}
>> +
>> +	*err = crypto_sync_skcipher_setkey(ctx->tfm, __bpf_dynptr_data_ptr(pkey),
>> +					   __bpf_dynptr_size(pkey));
>> +	if (*err)
>> +		goto err;
>> +
>> +	refcount_set(&ctx->usage, 1);
>> +
>> +	return ctx;
>> +err:
>> +	if (ctx->tfm)
>> +		crypto_free_sync_skcipher(ctx->tfm);
>> +	kfree(ctx);
>> +
>> +	return NULL;
>> +}
>> +
>> +static void crypto_free_sync_skcipher_cb(struct rcu_head *head)
>> +{
>> +	struct bpf_crypto_skcipher_ctx *ctx;
>> +
>> +	ctx = container_of(head, struct bpf_crypto_skcipher_ctx, rcu);
>> +	crypto_free_sync_skcipher(ctx->tfm);
>> +	kfree(ctx);
>> +}
>> +
>> +/**
>> + * bpf_crypto_skcipher_ctx_acquire() - Acquire a reference to a BPF crypto context.
>> + * @ctx: The BPF crypto context being acquired. The ctx must be a trusted
>> + *	     pointer.
>> + *
>> + * Acquires a reference to a BPF crypto context. The context returned by this function
>> + * must either be embedded in a map as a kptr, or freed with
>> + * bpf_crypto_skcipher_ctx_release().
>> + */
>> +__bpf_kfunc struct bpf_crypto_skcipher_ctx *
>> +bpf_crypto_skcipher_ctx_acquire(struct bpf_crypto_skcipher_ctx *ctx)
>> +{
>> +	refcount_inc(&ctx->usage);
>> +	return ctx;
>> +}
>> +
>> +/**
>> + * bpf_crypto_skcipher_ctx_release() - Release a previously acquired BPF crypto context.
>> + * @ctx: The crypto context being released.
>> + *
>> + * Releases a previously acquired reference to a BPF cpumask. When the final
>> + * reference of the BPF cpumask has been released, it is subsequently freed in
>> + * an RCU callback in the BPF memory allocator.
>> + */
>> +__bpf_kfunc void bpf_crypto_skcipher_ctx_release(struct bpf_crypto_skcipher_ctx *ctx)
>> +{
>> +	if (refcount_dec_and_test(&ctx->usage))
>> +		call_rcu(&ctx->rcu, crypto_free_sync_skcipher_cb);
>> +}
>> +
>> +static int bpf_crypto_skcipher_crypt(struct crypto_sync_skcipher *tfm,
>> +				     const struct bpf_dynptr_kern *src,
>> +				     struct bpf_dynptr_kern *dst,
>> +				     const struct bpf_dynptr_kern *iv,
>> +				     bool decrypt)
>> +{
>> +	struct skcipher_request *req = NULL;
>> +	struct scatterlist sgin, sgout;
>> +	int err;
>> +
>> +	if (crypto_sync_skcipher_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
>> +		return -EINVAL;
>> +
>> +	if (__bpf_dynptr_is_rdonly(dst))
>> +		return -EINVAL;
>> +
>> +	if (!__bpf_dynptr_size(dst) || !__bpf_dynptr_size(src))
>> +		return -EINVAL;
>> +
>> +	if (__bpf_dynptr_size(iv) != crypto_sync_skcipher_ivsize(tfm))
>> +		return -EINVAL;
>> +
>> +	req = skcipher_request_alloc(&tfm->base, GFP_ATOMIC);
>> +	if (!req)
>> +		return -ENOMEM;
>> +
>> +	sg_init_one(&sgin, __bpf_dynptr_data_ptr(src), __bpf_dynptr_size(src));
>> +	sg_init_one(&sgout, __bpf_dynptr_data_ptr(dst), __bpf_dynptr_size(dst));
>> +
>> +	skcipher_request_set_crypt(req, &sgin, &sgout, __bpf_dynptr_size(src),
>> +				   __bpf_dynptr_data_ptr(iv));
>> +
>> +	err = decrypt ? crypto_skcipher_decrypt(req) : crypto_skcipher_encrypt(req);
>> +
>> +	skcipher_request_free(req);
>> +
>> +	return err;
>> +}
>> +
>> +/**
>> + * bpf_crypto_skcipher_decrypt() - Decrypt buffer using configured context and IV provided.
>> + * @ctx:	The crypto context being used. The ctx must be a trusted pointer.
>> + * @src:	bpf_dynptr to the encrypted data. Must be a trusted pointer.
>> + * @dst:	bpf_dynptr to the buffer where to store the result. Must be a trusted pointer.
>> + * @iv:		bpf_dynptr to IV data to be used by decryptor.
>> + *
>> + * Decrypts provided buffer using IV data and the crypto context. Crypto context must be configured.
>> + */
>> +__bpf_kfunc int bpf_crypto_skcipher_decrypt(struct bpf_crypto_skcipher_ctx *ctx,
>> +					    const struct bpf_dynptr_kern *src,
>> +					    struct bpf_dynptr_kern *dst,
>> +					    const struct bpf_dynptr_kern *iv)
>> +{
>> +	return bpf_crypto_skcipher_crypt(ctx->tfm, src, dst, iv, true);
>> +}
>> +
>> +/**
>> + * bpf_crypto_skcipher_encrypt() - Encrypt buffer using configured context and IV provided.
>> + * @ctx:	The crypto context being used. The ctx must be a trusted pointer.
>> + * @src:	bpf_dynptr to the plain data. Must be a trusted pointer.
>> + * @dst:	bpf_dynptr to buffer where to store the result. Must be a trusted pointer.
>> + * @iv:		bpf_dynptr to IV data to be used by decryptor.
>> + *
>> + * Encrypts provided buffer using IV data and the crypto context. Crypto context must be configured.
>> + */
>> +__bpf_kfunc int bpf_crypto_skcipher_encrypt(struct bpf_crypto_skcipher_ctx *ctx,
>> +					    const struct bpf_dynptr_kern *src,
>> +					    struct bpf_dynptr_kern *dst,
>> +					    const struct bpf_dynptr_kern *iv)
>> +{
>> +	return bpf_crypto_skcipher_crypt(ctx->tfm, src, dst, iv, false);
>> +}
>> +
>> +__diag_pop();
>> +
>> +BTF_SET8_START(crypt_skcipher_init_kfunc_btf_ids)
>> +BTF_ID_FLAGS(func, bpf_crypto_skcipher_ctx_create, KF_ACQUIRE | KF_RET_NULL | KF_SLEEPABLE)
>> +BTF_ID_FLAGS(func, bpf_crypto_skcipher_ctx_release, KF_RELEASE)
>> +BTF_ID_FLAGS(func, bpf_crypto_skcipher_ctx_acquire, KF_ACQUIRE | KF_TRUSTED_ARGS)
>> +BTF_SET8_END(crypt_skcipher_init_kfunc_btf_ids)
>> +
>> +static const struct btf_kfunc_id_set crypt_skcipher_init_kfunc_set = {
>> +	.owner = THIS_MODULE,
>> +	.set   = &crypt_skcipher_init_kfunc_btf_ids,
>> +};
>> +
>> +BTF_SET8_START(crypt_skcipher_kfunc_btf_ids)
>> +BTF_ID_FLAGS(func, bpf_crypto_skcipher_decrypt, KF_RCU)
>> +BTF_ID_FLAGS(func, bpf_crypto_skcipher_encrypt, KF_RCU)
>> +BTF_SET8_END(crypt_skcipher_kfunc_btf_ids)
>> +
>> +static const struct btf_kfunc_id_set crypt_skcipher_kfunc_set = {
>> +	.owner = THIS_MODULE,
>> +	.set   = &crypt_skcipher_kfunc_btf_ids,
>> +};
>> +
>> +BTF_ID_LIST(crypto_skcipher_dtor_ids)
>> +BTF_ID(struct, bpf_crypto_skcipher_ctx)
>> +BTF_ID(func, bpf_crypto_skcipher_ctx_release)
>> +
>> +static int __init crypto_skcipher_kfunc_init(void)
>> +{
>> +	int ret;
>> +	const struct btf_id_dtor_kfunc crypto_skcipher_dtors[] = {
>> +		{
>> +			.btf_id	      = crypto_skcipher_dtor_ids[0],
>> +			.kfunc_btf_id = crypto_skcipher_dtor_ids[1]
>> +		},
>> +	};
>> +
>> +	ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_SCHED_CLS, &crypt_skcipher_kfunc_set);
>> +	ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SCHED_ACT, &crypt_skcipher_kfunc_set);
>> +	ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_XDP, &crypt_skcipher_kfunc_set);
>> +	ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_UNSPEC,
>> +					       &crypt_skcipher_init_kfunc_set);
>> +	return  ret ?: register_btf_id_dtor_kfuncs(crypto_skcipher_dtors,
>> +						   ARRAY_SIZE(crypto_skcipher_dtors),
>> +						   THIS_MODULE);
>> +}
>> +
>> +late_initcall(crypto_skcipher_kfunc_init);
>> diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c
>> index 62a53ebfedf9..2020884fca3d 100644
>> --- a/kernel/bpf/helpers.c
>> +++ b/kernel/bpf/helpers.c
>> @@ -1429,7 +1429,7 @@ static const struct bpf_func_proto bpf_kptr_xchg_proto = {
>>   #define DYNPTR_SIZE_MASK	0xFFFFFF
>>   #define DYNPTR_RDONLY_BIT	BIT(31)
>>   
>> -static bool __bpf_dynptr_is_rdonly(const struct bpf_dynptr_kern *ptr)
>> +bool __bpf_dynptr_is_rdonly(const struct bpf_dynptr_kern *ptr)
>>   {
>>   	return ptr->size & DYNPTR_RDONLY_BIT;
>>   }
>> @@ -1444,7 +1444,7 @@ static void bpf_dynptr_set_type(struct bpf_dynptr_kern *ptr, enum bpf_dynptr_typ
>>   	ptr->size |= type << DYNPTR_TYPE_SHIFT;
>>   }
>>   
>> -static enum bpf_dynptr_type bpf_dynptr_get_type(const struct bpf_dynptr_kern *ptr)
>> +enum bpf_dynptr_type bpf_dynptr_get_type(const struct bpf_dynptr_kern *ptr)
>>   {
>>   	return (ptr->size & ~(DYNPTR_RDONLY_BIT)) >> DYNPTR_TYPE_SHIFT;
>>   }
>> diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
>> index bb58987e4844..75d2f47ca3cb 100644
>> --- a/kernel/bpf/verifier.c
>> +++ b/kernel/bpf/verifier.c
>> @@ -5184,6 +5184,7 @@ BTF_ID(struct, prog_test_ref_kfunc)
>>   BTF_ID(struct, cgroup)
>>   BTF_ID(struct, bpf_cpumask)
>>   BTF_ID(struct, task_struct)
>> +BTF_ID(struct, bpf_crypto_skcipher_ctx)
>>   BTF_SET_END(rcu_protected_types)
>>   
>>   static bool rcu_protected_object(const struct btf *btf, u32 btf_id)
>> -- 
>> 2.39.3
>>
>>
Martin KaFai Lau Nov. 1, 2023, 9:49 p.m. UTC | #3
On 10/31/23 6:48 AM, Vadim Fedorenko wrote:
> --- /dev/null
> +++ b/kernel/bpf/crypto.c
> @@ -0,0 +1,280 @@
> +// SPDX-License-Identifier: GPL-2.0-only
> +/* Copyright (c) 2023 Meta, Inc */
> +#include <linux/bpf.h>
> +#include <linux/bpf_mem_alloc.h>
> +#include <linux/btf.h>
> +#include <linux/btf_ids.h>
> +#include <linux/filter.h>
> +#include <linux/scatterlist.h>
> +#include <linux/skbuff.h>
> +#include <crypto/skcipher.h>
> +
> +/**
> + * struct bpf_crypto_skcipher_ctx - refcounted BPF sync skcipher context structure
> + * @tfm:	The pointer to crypto_sync_skcipher struct.
> + * @rcu:	The RCU head used to free the crypto context with RCU safety.
> + * @usage:	Object reference counter. When the refcount goes to 0, the
> + *		memory is released back to the BPF allocator, which provides
> + *		RCU safety.
> + */
> +struct bpf_crypto_skcipher_ctx {
> +	struct crypto_sync_skcipher *tfm;
> +	struct rcu_head rcu;
> +	refcount_t usage;
> +};
> +
> +__diag_push();
> +__diag_ignore_all("-Wmissing-prototypes",
> +		  "Global kfuncs as their definitions will be in BTF");
> +
> +static void *__bpf_dynptr_data_ptr(const struct bpf_dynptr_kern *ptr)
> +{
> +	enum bpf_dynptr_type type;
> +
> +	if (!ptr->data)
> +		return NULL;
> +
> +	type = bpf_dynptr_get_type(ptr);
> +
> +	switch (type) {
> +	case BPF_DYNPTR_TYPE_LOCAL:
> +	case BPF_DYNPTR_TYPE_RINGBUF:
> +		return ptr->data + ptr->offset;
> +	case BPF_DYNPTR_TYPE_SKB:
> +		return skb_pointer_if_linear(ptr->data, ptr->offset, __bpf_dynptr_size(ptr));
> +	case BPF_DYNPTR_TYPE_XDP:
> +	{
> +		void *xdp_ptr = bpf_xdp_pointer(ptr->data, ptr->offset, __bpf_dynptr_size(ptr));

I suspect what it is doing here (for skb and xdp in particular) is very similar 
to bpf_dynptr_slice. Please check if bpf_dynptr_slice(ptr, 0, NULL, sz) will work.


> +		if (!IS_ERR_OR_NULL(xdp_ptr))
> +			return xdp_ptr;
> +
> +		return NULL;
> +	}
> +	default:
> +		WARN_ONCE(true, "unknown dynptr type %d\n", type);
> +		return NULL;
> +	}
> +}
> +
> +/**
> + * bpf_crypto_skcipher_ctx_create() - Create a mutable BPF crypto context.
> + *
> + * Allocates a crypto context that can be used, acquired, and released by
> + * a BPF program. The crypto context returned by this function must either
> + * be embedded in a map as a kptr, or freed with bpf_crypto_skcipher_ctx_release().
> + *
> + * bpf_crypto_skcipher_ctx_create() allocates memory using the BPF memory
> + * allocator, and will not block. It may return NULL if no memory is available.
> + * @algo: bpf_dynptr which holds string representation of algorithm.
> + * @key:  bpf_dynptr which holds cipher key to do crypto.
> + */
> +__bpf_kfunc struct bpf_crypto_skcipher_ctx *
> +bpf_crypto_skcipher_ctx_create(const struct bpf_dynptr_kern *palgo,

Song's patch on __const_str should help on the palgo (which is a string) also:
https://lore.kernel.org/bpf/20231024235551.2769174-4-song@kernel.org/

> +			       const struct bpf_dynptr_kern *pkey, int *err)
> +{
> +	struct bpf_crypto_skcipher_ctx *ctx;
> +	char *algo;
> +
> +	if (__bpf_dynptr_size(palgo) > CRYPTO_MAX_ALG_NAME) {
> +		*err = -EINVAL;
> +		return NULL;
> +	}
> +
> +	algo = __bpf_dynptr_data_ptr(palgo);
> +
> +	if (!crypto_has_skcipher(algo, CRYPTO_ALG_TYPE_SKCIPHER, CRYPTO_ALG_TYPE_MASK)) {
> +		*err = -EOPNOTSUPP;
> +		return NULL;
> +	}
> +
> +	ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
> +	if (!ctx) {
> +		*err = -ENOMEM;
> +		return NULL;
> +	}
> +
> +	memset(ctx, 0, sizeof(*ctx));

nit. kzalloc()

> +
> +	ctx->tfm = crypto_alloc_sync_skcipher(algo, 0, 0);
> +	if (IS_ERR(ctx->tfm)) {
> +		*err = PTR_ERR(ctx->tfm);
> +		ctx->tfm = NULL;
> +		goto err;
> +	}
> +
> +	*err = crypto_sync_skcipher_setkey(ctx->tfm, __bpf_dynptr_data_ptr(pkey),
> +					   __bpf_dynptr_size(pkey));
> +	if (*err)
> +		goto err;
> +
> +	refcount_set(&ctx->usage, 1);
> +
> +	return ctx;
> +err:
> +	if (ctx->tfm)
> +		crypto_free_sync_skcipher(ctx->tfm);
> +	kfree(ctx);
> +
> +	return NULL;
> +}

[ ... ]

> +static int bpf_crypto_skcipher_crypt(struct crypto_sync_skcipher *tfm,
> +				     const struct bpf_dynptr_kern *src,
> +				     struct bpf_dynptr_kern *dst,
> +				     const struct bpf_dynptr_kern *iv,
> +				     bool decrypt)
> +{
> +	struct skcipher_request *req = NULL;
> +	struct scatterlist sgin, sgout;
> +	int err;
> +
> +	if (crypto_sync_skcipher_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
> +		return -EINVAL;
> +
> +	if (__bpf_dynptr_is_rdonly(dst))
> +		return -EINVAL;
> +
> +	if (!__bpf_dynptr_size(dst) || !__bpf_dynptr_size(src))
> +		return -EINVAL;
> +
> +	if (__bpf_dynptr_size(iv) != crypto_sync_skcipher_ivsize(tfm))
> +		return -EINVAL;
> +
> +	req = skcipher_request_alloc(&tfm->base, GFP_ATOMIC);

Doing alloc per packet may kill performance. Is it possible to optimize it 
somehow? What is the usual size of the req (e.g. the example in the selftest)?

> +	if (!req)
> +		return -ENOMEM;
> +
> +	sg_init_one(&sgin, __bpf_dynptr_data_ptr(src), __bpf_dynptr_size(src));
> +	sg_init_one(&sgout, __bpf_dynptr_data_ptr(dst), __bpf_dynptr_size(dst));
> +
> +	skcipher_request_set_crypt(req, &sgin, &sgout, __bpf_dynptr_size(src),
> +				   __bpf_dynptr_data_ptr(iv));
> +
> +	err = decrypt ? crypto_skcipher_decrypt(req) : crypto_skcipher_encrypt(req);

I am hitting this with the selftest in patch 2:

[   39.806675] =============================
[   39.807243] WARNING: suspicious RCU usage
[   39.807825] 6.6.0-rc7-02091-g17e023652cc1 #336 Tainted: G           O
[   39.808798] -----------------------------
[   39.809368] kernel/sched/core.c:10149 Illegal context switch in RCU-bh 
read-side critical section!
[   39.810589]
[   39.810589] other info that might help us debug this:
[   39.810589]
[   39.811696]
[   39.811696] rcu_scheduler_active = 2, debug_locks = 1
[   39.812616] 2 locks held by test_progs/1769:
[   39.813249]  #0: ffffffff84dce140 (rcu_read_lock){....}-{1:3}, at: 
ip6_finish_output2+0x625/0x1b70
[   39.814539]  #1: ffffffff84dce1a0 (rcu_read_lock_bh){....}-{1:3}, at: 
__dev_queue_xmit+0x1df/0x2c40
[   39.815813]
[   39.815813] stack backtrace:
[   39.816441] CPU: 1 PID: 1769 Comm: test_progs Tainted: G           O 
6.6.0-rc7-02091-g17e023652cc1 #336
[   39.817774] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 
rel-1.14.0-0-g155821a1990b-prebuilt.qemu.org 04/01/2014
[   39.819312] Call Trace:
[   39.819655]  <TASK>
[   39.819967]  dump_stack_lvl+0x130/0x1d0
[   39.822669]  dump_stack+0x14/0x20
[   39.823136]  lockdep_rcu_suspicious+0x220/0x350
[   39.823777]  __might_resched+0xe0/0x660
[   39.827915]  __might_sleep+0x89/0xf0
[   39.828423]  skcipher_walk_virt+0x55/0x120
[   39.828990]  crypto_ecb_decrypt+0x159/0x310
[   39.833846]  crypto_skcipher_decrypt+0xa0/0xd0
[   39.834481]  bpf_crypto_skcipher_crypt+0x29a/0x3c0
[   39.837100]  bpf_crypto_skcipher_decrypt+0x56/0x70
[   39.837770]  bpf_prog_fa576505ab32d186_decrypt_sanity+0x180/0x185
[   39.838627]  cls_bpf_classify+0x3b6/0xf80
[   39.839807]  tcf_classify+0x2f4/0x550

> +
> +	skcipher_request_free(req);
> +
> +	return err;
> +}
> +
Vadim Fedorenko Nov. 1, 2023, 10:50 p.m. UTC | #4
On 01/11/2023 21:49, Martin KaFai Lau wrote:
> On 10/31/23 6:48 AM, Vadim Fedorenko wrote:
>> --- /dev/null
>> +++ b/kernel/bpf/crypto.c
>> @@ -0,0 +1,280 @@
>> +// SPDX-License-Identifier: GPL-2.0-only
>> +/* Copyright (c) 2023 Meta, Inc */
>> +#include <linux/bpf.h>
>> +#include <linux/bpf_mem_alloc.h>
>> +#include <linux/btf.h>
>> +#include <linux/btf_ids.h>
>> +#include <linux/filter.h>
>> +#include <linux/scatterlist.h>
>> +#include <linux/skbuff.h>
>> +#include <crypto/skcipher.h>
>> +
>> +/**
>> + * struct bpf_crypto_skcipher_ctx - refcounted BPF sync skcipher 
>> context structure
>> + * @tfm:    The pointer to crypto_sync_skcipher struct.
>> + * @rcu:    The RCU head used to free the crypto context with RCU 
>> safety.
>> + * @usage:    Object reference counter. When the refcount goes to 0, the
>> + *        memory is released back to the BPF allocator, which provides
>> + *        RCU safety.
>> + */
>> +struct bpf_crypto_skcipher_ctx {
>> +    struct crypto_sync_skcipher *tfm;
>> +    struct rcu_head rcu;
>> +    refcount_t usage;
>> +};
>> +
>> +__diag_push();
>> +__diag_ignore_all("-Wmissing-prototypes",
>> +          "Global kfuncs as their definitions will be in BTF");
>> +
>> +static void *__bpf_dynptr_data_ptr(const struct bpf_dynptr_kern *ptr)
>> +{
>> +    enum bpf_dynptr_type type;
>> +
>> +    if (!ptr->data)
>> +        return NULL;
>> +
>> +    type = bpf_dynptr_get_type(ptr);
>> +
>> +    switch (type) {
>> +    case BPF_DYNPTR_TYPE_LOCAL:
>> +    case BPF_DYNPTR_TYPE_RINGBUF:
>> +        return ptr->data + ptr->offset;
>> +    case BPF_DYNPTR_TYPE_SKB:
>> +        return skb_pointer_if_linear(ptr->data, ptr->offset, 
>> __bpf_dynptr_size(ptr));
>> +    case BPF_DYNPTR_TYPE_XDP:
>> +    {
>> +        void *xdp_ptr = bpf_xdp_pointer(ptr->data, ptr->offset, 
>> __bpf_dynptr_size(ptr));
> 
> I suspect what it is doing here (for skb and xdp in particular) is very 
> similar to bpf_dynptr_slice. Please check if bpf_dynptr_slice(ptr, 0, 
> NULL, sz) will work.
> 

Well, yes, it's simplified version of bpf_dynptr_slice. The problem is
that bpf_dynptr_slice bpf_kfunc which cannot be used in another
bpf_kfunc. Should I refactor the code to use it in both places? Like
create __bpf_dynptr_slice() which will be internal part of bpf_kfunc?

> 
>> +        if (!IS_ERR_OR_NULL(xdp_ptr))
>> +            return xdp_ptr;
>> +
>> +        return NULL;
>> +    }
>> +    default:
>> +        WARN_ONCE(true, "unknown dynptr type %d\n", type);
>> +        return NULL;
>> +    }
>> +}
>> +
>> +/**
>> + * bpf_crypto_skcipher_ctx_create() - Create a mutable BPF crypto 
>> context.
>> + *
>> + * Allocates a crypto context that can be used, acquired, and 
>> released by
>> + * a BPF program. The crypto context returned by this function must 
>> either
>> + * be embedded in a map as a kptr, or freed with 
>> bpf_crypto_skcipher_ctx_release().
>> + *
>> + * bpf_crypto_skcipher_ctx_create() allocates memory using the BPF 
>> memory
>> + * allocator, and will not block. It may return NULL if no memory is 
>> available.
>> + * @algo: bpf_dynptr which holds string representation of algorithm.
>> + * @key:  bpf_dynptr which holds cipher key to do crypto.
>> + */
>> +__bpf_kfunc struct bpf_crypto_skcipher_ctx *
>> +bpf_crypto_skcipher_ctx_create(const struct bpf_dynptr_kern *palgo,
> 
> Song's patch on __const_str should help on the palgo (which is a string) 
> also:
> https://lore.kernel.org/bpf/20231024235551.2769174-4-song@kernel.org/
> 

Got it, I'll update it.

>> +                   const struct bpf_dynptr_kern *pkey, int *err)
>> +{
>> +    struct bpf_crypto_skcipher_ctx *ctx;
>> +    char *algo;
>> +
>> +    if (__bpf_dynptr_size(palgo) > CRYPTO_MAX_ALG_NAME) {
>> +        *err = -EINVAL;
>> +        return NULL;
>> +    }
>> +
>> +    algo = __bpf_dynptr_data_ptr(palgo);
>> +
>> +    if (!crypto_has_skcipher(algo, CRYPTO_ALG_TYPE_SKCIPHER, 
>> CRYPTO_ALG_TYPE_MASK)) {
>> +        *err = -EOPNOTSUPP;
>> +        return NULL;
>> +    }
>> +
>> +    ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
>> +    if (!ctx) {
>> +        *err = -ENOMEM;
>> +        return NULL;
>> +    }
>> +
>> +    memset(ctx, 0, sizeof(*ctx));
> 
> nit. kzalloc()
> 
>> +
>> +    ctx->tfm = crypto_alloc_sync_skcipher(algo, 0, 0);
>> +    if (IS_ERR(ctx->tfm)) {
>> +        *err = PTR_ERR(ctx->tfm);
>> +        ctx->tfm = NULL;
>> +        goto err;
>> +    }
>> +
>> +    *err = crypto_sync_skcipher_setkey(ctx->tfm, 
>> __bpf_dynptr_data_ptr(pkey),
>> +                       __bpf_dynptr_size(pkey));
>> +    if (*err)
>> +        goto err;
>> +
>> +    refcount_set(&ctx->usage, 1);
>> +
>> +    return ctx;
>> +err:
>> +    if (ctx->tfm)
>> +        crypto_free_sync_skcipher(ctx->tfm);
>> +    kfree(ctx);
>> +
>> +    return NULL;
>> +}
> 
> [ ... ]
> 
>> +static int bpf_crypto_skcipher_crypt(struct crypto_sync_skcipher *tfm,
>> +                     const struct bpf_dynptr_kern *src,
>> +                     struct bpf_dynptr_kern *dst,
>> +                     const struct bpf_dynptr_kern *iv,
>> +                     bool decrypt)
>> +{
>> +    struct skcipher_request *req = NULL;
>> +    struct scatterlist sgin, sgout;
>> +    int err;
>> +
>> +    if (crypto_sync_skcipher_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
>> +        return -EINVAL;
>> +
>> +    if (__bpf_dynptr_is_rdonly(dst))
>> +        return -EINVAL;
>> +
>> +    if (!__bpf_dynptr_size(dst) || !__bpf_dynptr_size(src))
>> +        return -EINVAL;
>> +
>> +    if (__bpf_dynptr_size(iv) != crypto_sync_skcipher_ivsize(tfm))
>> +        return -EINVAL;
>> +
>> +    req = skcipher_request_alloc(&tfm->base, GFP_ATOMIC);
> 
> Doing alloc per packet may kill performance. Is it possible to optimize 
> it somehow? What is the usual size of the req (e.g. the example in the 
> selftest)?
> 

In ktls code aead_request is allocated every time encryption is invoked, 
see tls_decrypt_sg(), apparently per skb. Doesn't look like performance
killer. For selftest it's only sizeof(struct skcipher_request).

>> +    if (!req)
>> +        return -ENOMEM;
>> +
>> +    sg_init_one(&sgin, __bpf_dynptr_data_ptr(src), 
>> __bpf_dynptr_size(src));
>> +    sg_init_one(&sgout, __bpf_dynptr_data_ptr(dst), 
>> __bpf_dynptr_size(dst));
>> +
>> +    skcipher_request_set_crypt(req, &sgin, &sgout, 
>> __bpf_dynptr_size(src),
>> +                   __bpf_dynptr_data_ptr(iv));
>> +
>> +    err = decrypt ? crypto_skcipher_decrypt(req) : 
>> crypto_skcipher_encrypt(req);
> 
> I am hitting this with the selftest in patch 2:
> 
> [   39.806675] =============================
> [   39.807243] WARNING: suspicious RCU usage
> [   39.807825] 6.6.0-rc7-02091-g17e023652cc1 #336 Tainted: G           O
> [   39.808798] -----------------------------
> [   39.809368] kernel/sched/core.c:10149 Illegal context switch in 
> RCU-bh read-side critical section!
> [   39.810589]
> [   39.810589] other info that might help us debug this:
> [   39.810589]
> [   39.811696]
> [   39.811696] rcu_scheduler_active = 2, debug_locks = 1
> [   39.812616] 2 locks held by test_progs/1769:
> [   39.813249]  #0: ffffffff84dce140 (rcu_read_lock){....}-{1:3}, at: 
> ip6_finish_output2+0x625/0x1b70
> [   39.814539]  #1: ffffffff84dce1a0 (rcu_read_lock_bh){....}-{1:3}, at: 
> __dev_queue_xmit+0x1df/0x2c40
> [   39.815813]
> [   39.815813] stack backtrace:
> [   39.816441] CPU: 1 PID: 1769 Comm: test_progs Tainted: G           O 
> 6.6.0-rc7-02091-g17e023652cc1 #336
> [   39.817774] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), 
> BIOS rel-1.14.0-0-g155821a1990b-prebuilt.qemu.org 04/01/2014
> [   39.819312] Call Trace:
> [   39.819655]  <TASK>
> [   39.819967]  dump_stack_lvl+0x130/0x1d0
> [   39.822669]  dump_stack+0x14/0x20
> [   39.823136]  lockdep_rcu_suspicious+0x220/0x350
> [   39.823777]  __might_resched+0xe0/0x660
> [   39.827915]  __might_sleep+0x89/0xf0
> [   39.828423]  skcipher_walk_virt+0x55/0x120
> [   39.828990]  crypto_ecb_decrypt+0x159/0x310
> [   39.833846]  crypto_skcipher_decrypt+0xa0/0xd0
> [   39.834481]  bpf_crypto_skcipher_crypt+0x29a/0x3c0
> [   39.837100]  bpf_crypto_skcipher_decrypt+0x56/0x70
> [   39.837770]  bpf_prog_fa576505ab32d186_decrypt_sanity+0x180/0x185
> [   39.838627]  cls_bpf_classify+0x3b6/0xf80
> [   39.839807]  tcf_classify+0x2f4/0x550
> 

That's odd. skcipher_walk_virt() checks for SKCIPHER_WALK_SLEEP which
depends on CRYPTO_TFM_REQ_MAY_SLEEP of tfm, which shouldn't be set for
crypto_alloc_sync_skcipher(). I think we need some crypto@ folks to jump
in and explain.

David, Herbert could you please take a look at the patchset to confirm
that crypto API is used properly.

>> +
>> +    skcipher_request_free(req);
>> +
>> +    return err;
>> +}
>> +
> 
>
Martin KaFai Lau Nov. 1, 2023, 10:59 p.m. UTC | #5
On 11/1/23 3:50 PM, Vadim Fedorenko wrote:
>>> +static int bpf_crypto_skcipher_crypt(struct crypto_sync_skcipher *tfm,
>>> +                     const struct bpf_dynptr_kern *src,
>>> +                     struct bpf_dynptr_kern *dst,
>>> +                     const struct bpf_dynptr_kern *iv,
>>> +                     bool decrypt)
>>> +{
>>> +    struct skcipher_request *req = NULL;
>>> +    struct scatterlist sgin, sgout;
>>> +    int err;
>>> +
>>> +    if (crypto_sync_skcipher_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
>>> +        return -EINVAL;
>>> +
>>> +    if (__bpf_dynptr_is_rdonly(dst))
>>> +        return -EINVAL;
>>> +
>>> +    if (!__bpf_dynptr_size(dst) || !__bpf_dynptr_size(src))
>>> +        return -EINVAL;
>>> +
>>> +    if (__bpf_dynptr_size(iv) != crypto_sync_skcipher_ivsize(tfm))
>>> +        return -EINVAL;
>>> +
>>> +    req = skcipher_request_alloc(&tfm->base, GFP_ATOMIC);
>>
>> Doing alloc per packet may kill performance. Is it possible to optimize it 
>> somehow? What is the usual size of the req (e.g. the example in the selftest)?
>>
> 
> In ktls code aead_request is allocated every time encryption is invoked, see 
> tls_decrypt_sg(), apparently per skb. Doesn't look like performance
> killer. For selftest it's only sizeof(struct skcipher_request).

ktls is doing the en/decrypt on the userspace behalf to compensate the cost.

When this kfunc is used in xdp to decrypt a few bytes for each packet and then 
XDP_TX out, this extra alloc will be quite noticeable. If the size is usually 
small, can it be done in the stack memory?
Martin KaFai Lau Nov. 1, 2023, 11:41 p.m. UTC | #6
On 11/1/23 3:50 PM, Vadim Fedorenko wrote:
>>> +static void *__bpf_dynptr_data_ptr(const struct bpf_dynptr_kern *ptr)
>>> +{
>>> +    enum bpf_dynptr_type type;
>>> +
>>> +    if (!ptr->data)
>>> +        return NULL;
>>> +
>>> +    type = bpf_dynptr_get_type(ptr);
>>> +
>>> +    switch (type) {
>>> +    case BPF_DYNPTR_TYPE_LOCAL:
>>> +    case BPF_DYNPTR_TYPE_RINGBUF:
>>> +        return ptr->data + ptr->offset;
>>> +    case BPF_DYNPTR_TYPE_SKB:
>>> +        return skb_pointer_if_linear(ptr->data, ptr->offset, 
>>> __bpf_dynptr_size(ptr));
>>> +    case BPF_DYNPTR_TYPE_XDP:
>>> +    {
>>> +        void *xdp_ptr = bpf_xdp_pointer(ptr->data, ptr->offset, 
>>> __bpf_dynptr_size(ptr));
>>
>> I suspect what it is doing here (for skb and xdp in particular) is very 
>> similar to bpf_dynptr_slice. Please check if bpf_dynptr_slice(ptr, 0, NULL, 
>> sz) will work.
>>
> 
> Well, yes, it's simplified version of bpf_dynptr_slice. The problem is
> that bpf_dynptr_slice bpf_kfunc which cannot be used in another
> bpf_kfunc. Should I refactor the code to use it in both places? Like

Sorry, scrolled too fast in my earlier reply :(

I am not aware of this limitation. What error does it have?
The bpf_dynptr_slice_rdwr kfunc() is also calling the bpf_dynptr_slice() kfunc.

> create __bpf_dynptr_slice() which will be internal part of bpf_kfunc?
Vadim Fedorenko Nov. 2, 2023, 12:31 a.m. UTC | #7
On 01.11.2023 22:59, Martin KaFai Lau wrote:
> On 11/1/23 3:50 PM, Vadim Fedorenko wrote:
>>>> +static int bpf_crypto_skcipher_crypt(struct crypto_sync_skcipher *tfm,
>>>> +                     const struct bpf_dynptr_kern *src,
>>>> +                     struct bpf_dynptr_kern *dst,
>>>> +                     const struct bpf_dynptr_kern *iv,
>>>> +                     bool decrypt)
>>>> +{
>>>> +    struct skcipher_request *req = NULL;
>>>> +    struct scatterlist sgin, sgout;
>>>> +    int err;
>>>> +
>>>> +    if (crypto_sync_skcipher_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
>>>> +        return -EINVAL;
>>>> +
>>>> +    if (__bpf_dynptr_is_rdonly(dst))
>>>> +        return -EINVAL;
>>>> +
>>>> +    if (!__bpf_dynptr_size(dst) || !__bpf_dynptr_size(src))
>>>> +        return -EINVAL;
>>>> +
>>>> +    if (__bpf_dynptr_size(iv) != crypto_sync_skcipher_ivsize(tfm))
>>>> +        return -EINVAL;
>>>> +
>>>> +    req = skcipher_request_alloc(&tfm->base, GFP_ATOMIC);
>>>
>>> Doing alloc per packet may kill performance. Is it possible to optimize it 
>>> somehow? What is the usual size of the req (e.g. the example in the selftest)?
>>>
>>
>> In ktls code aead_request is allocated every time encryption is invoked, see 
>> tls_decrypt_sg(), apparently per skb. Doesn't look like performance
>> killer. For selftest it's only sizeof(struct skcipher_request).
> 
> ktls is doing the en/decrypt on the userspace behalf to compensate the cost.
> 
> When this kfunc is used in xdp to decrypt a few bytes for each packet and then 
> XDP_TX out, this extra alloc will be quite noticeable. If the size is usually 
> small, can it be done in the stack memory?

Hmm... looks like SYNC_SKCIPHER_REQUEST_ON_STACK will help us. Ok, I'll change 
this part to use stack allocations.
Vadim Fedorenko Nov. 2, 2023, 12:38 a.m. UTC | #8
On 01.11.2023 23:41, Martin KaFai Lau wrote:
> On 11/1/23 3:50 PM, Vadim Fedorenko wrote:
>>>> +static void *__bpf_dynptr_data_ptr(const struct bpf_dynptr_kern *ptr)
>>>> +{
>>>> +    enum bpf_dynptr_type type;
>>>> +
>>>> +    if (!ptr->data)
>>>> +        return NULL;
>>>> +
>>>> +    type = bpf_dynptr_get_type(ptr);
>>>> +
>>>> +    switch (type) {
>>>> +    case BPF_DYNPTR_TYPE_LOCAL:
>>>> +    case BPF_DYNPTR_TYPE_RINGBUF:
>>>> +        return ptr->data + ptr->offset;
>>>> +    case BPF_DYNPTR_TYPE_SKB:
>>>> +        return skb_pointer_if_linear(ptr->data, ptr->offset, 
>>>> __bpf_dynptr_size(ptr));
>>>> +    case BPF_DYNPTR_TYPE_XDP:
>>>> +    {
>>>> +        void *xdp_ptr = bpf_xdp_pointer(ptr->data, ptr->offset, 
>>>> __bpf_dynptr_size(ptr));
>>>
>>> I suspect what it is doing here (for skb and xdp in particular) is very 
>>> similar to bpf_dynptr_slice. Please check if bpf_dynptr_slice(ptr, 0, NULL, 
>>> sz) will work.
>>>
>>
>> Well, yes, it's simplified version of bpf_dynptr_slice. The problem is
>> that bpf_dynptr_slice bpf_kfunc which cannot be used in another
>> bpf_kfunc. Should I refactor the code to use it in both places? Like
> 
> Sorry, scrolled too fast in my earlier reply :(
> 
> I am not aware of this limitation. What error does it have?
> The bpf_dynptr_slice_rdwr kfunc() is also calling the bpf_dynptr_slice() kfunc.
> 

Yeah, but they are in the same module. We do not declare prototypes of kfuncs in
linux/bpf.h and that's why we cannot use them outside of helpers.c.
The same problem was with bpf_dynptr_is_rdonly() I believe.

>> create __bpf_dynptr_slice() which will be internal part of bpf_kfunc?
> 
> 
> 
> 
> 
>
Vadim Fedorenko Nov. 2, 2023, 1:44 p.m. UTC | #9
On 01/11/2023 23:41, Martin KaFai Lau wrote:
> On 11/1/23 3:50 PM, Vadim Fedorenko wrote:
>>>> +static void *__bpf_dynptr_data_ptr(const struct bpf_dynptr_kern *ptr)
>>>> +{
>>>> +    enum bpf_dynptr_type type;
>>>> +
>>>> +    if (!ptr->data)
>>>> +        return NULL;
>>>> +
>>>> +    type = bpf_dynptr_get_type(ptr);
>>>> +
>>>> +    switch (type) {
>>>> +    case BPF_DYNPTR_TYPE_LOCAL:
>>>> +    case BPF_DYNPTR_TYPE_RINGBUF:
>>>> +        return ptr->data + ptr->offset;
>>>> +    case BPF_DYNPTR_TYPE_SKB:
>>>> +        return skb_pointer_if_linear(ptr->data, ptr->offset, 
>>>> __bpf_dynptr_size(ptr));
>>>> +    case BPF_DYNPTR_TYPE_XDP:
>>>> +    {
>>>> +        void *xdp_ptr = bpf_xdp_pointer(ptr->data, ptr->offset, 
>>>> __bpf_dynptr_size(ptr));
>>>
>>> I suspect what it is doing here (for skb and xdp in particular) is 
>>> very similar to bpf_dynptr_slice. Please check if 
>>> bpf_dynptr_slice(ptr, 0, NULL, sz) will work.
>>>
>>
>> Well, yes, it's simplified version of bpf_dynptr_slice. The problem is
>> that bpf_dynptr_slice bpf_kfunc which cannot be used in another
>> bpf_kfunc. Should I refactor the code to use it in both places? Like
> 
> Sorry, scrolled too fast in my earlier reply :(
> 
> I am not aware of this limitation. What error does it have?
> The bpf_dynptr_slice_rdwr kfunc() is also calling the bpf_dynptr_slice() 
> kfunc.
> 
>> create __bpf_dynptr_slice() which will be internal part of bpf_kfunc?

Apparently Song has a patch to expose these bpf_dynptr_slice* functions
ton in-kernel users.

https://lore.kernel.org/bpf/20231024235551.2769174-2-song@kernel.org/

Should I wait for it to be merged before sending next version?
Alexei Starovoitov Nov. 2, 2023, 3:36 p.m. UTC | #10
On Thu, Nov 2, 2023 at 6:44 AM Vadim Fedorenko
<vadim.fedorenko@linux.dev> wrote:
>
> On 01/11/2023 23:41, Martin KaFai Lau wrote:
> > On 11/1/23 3:50 PM, Vadim Fedorenko wrote:
> >>>> +static void *__bpf_dynptr_data_ptr(const struct bpf_dynptr_kern *ptr)
> >>>> +{
> >>>> +    enum bpf_dynptr_type type;
> >>>> +
> >>>> +    if (!ptr->data)
> >>>> +        return NULL;
> >>>> +
> >>>> +    type = bpf_dynptr_get_type(ptr);
> >>>> +
> >>>> +    switch (type) {
> >>>> +    case BPF_DYNPTR_TYPE_LOCAL:
> >>>> +    case BPF_DYNPTR_TYPE_RINGBUF:
> >>>> +        return ptr->data + ptr->offset;
> >>>> +    case BPF_DYNPTR_TYPE_SKB:
> >>>> +        return skb_pointer_if_linear(ptr->data, ptr->offset,
> >>>> __bpf_dynptr_size(ptr));
> >>>> +    case BPF_DYNPTR_TYPE_XDP:
> >>>> +    {
> >>>> +        void *xdp_ptr = bpf_xdp_pointer(ptr->data, ptr->offset,
> >>>> __bpf_dynptr_size(ptr));
> >>>
> >>> I suspect what it is doing here (for skb and xdp in particular) is
> >>> very similar to bpf_dynptr_slice. Please check if
> >>> bpf_dynptr_slice(ptr, 0, NULL, sz) will work.
> >>>
> >>
> >> Well, yes, it's simplified version of bpf_dynptr_slice. The problem is
> >> that bpf_dynptr_slice bpf_kfunc which cannot be used in another
> >> bpf_kfunc. Should I refactor the code to use it in both places? Like
> >
> > Sorry, scrolled too fast in my earlier reply :(
> >
> > I am not aware of this limitation. What error does it have?
> > The bpf_dynptr_slice_rdwr kfunc() is also calling the bpf_dynptr_slice()
> > kfunc.
> >
> >> create __bpf_dynptr_slice() which will be internal part of bpf_kfunc?
>
> Apparently Song has a patch to expose these bpf_dynptr_slice* functions
> ton in-kernel users.
>
> https://lore.kernel.org/bpf/20231024235551.2769174-2-song@kernel.org/
>
> Should I wait for it to be merged before sending next version?

If you need something from another developer it's best to ask them
explicitly :)
In this case Song can respin with just that change that you need.
Vadim Fedorenko Nov. 2, 2023, 4:14 p.m. UTC | #11
On 02/11/2023 15:36, Alexei Starovoitov wrote:
> On Thu, Nov 2, 2023 at 6:44 AM Vadim Fedorenko
> <vadim.fedorenko@linux.dev> wrote:
>>
>> On 01/11/2023 23:41, Martin KaFai Lau wrote:
>>> On 11/1/23 3:50 PM, Vadim Fedorenko wrote:
>>>>>> +static void *__bpf_dynptr_data_ptr(const struct bpf_dynptr_kern *ptr)
>>>>>> +{
>>>>>> +    enum bpf_dynptr_type type;
>>>>>> +
>>>>>> +    if (!ptr->data)
>>>>>> +        return NULL;
>>>>>> +
>>>>>> +    type = bpf_dynptr_get_type(ptr);
>>>>>> +
>>>>>> +    switch (type) {
>>>>>> +    case BPF_DYNPTR_TYPE_LOCAL:
>>>>>> +    case BPF_DYNPTR_TYPE_RINGBUF:
>>>>>> +        return ptr->data + ptr->offset;
>>>>>> +    case BPF_DYNPTR_TYPE_SKB:
>>>>>> +        return skb_pointer_if_linear(ptr->data, ptr->offset,
>>>>>> __bpf_dynptr_size(ptr));
>>>>>> +    case BPF_DYNPTR_TYPE_XDP:
>>>>>> +    {
>>>>>> +        void *xdp_ptr = bpf_xdp_pointer(ptr->data, ptr->offset,
>>>>>> __bpf_dynptr_size(ptr));
>>>>>
>>>>> I suspect what it is doing here (for skb and xdp in particular) is
>>>>> very similar to bpf_dynptr_slice. Please check if
>>>>> bpf_dynptr_slice(ptr, 0, NULL, sz) will work.
>>>>>
>>>>
>>>> Well, yes, it's simplified version of bpf_dynptr_slice. The problem is
>>>> that bpf_dynptr_slice bpf_kfunc which cannot be used in another
>>>> bpf_kfunc. Should I refactor the code to use it in both places? Like
>>>
>>> Sorry, scrolled too fast in my earlier reply :(
>>>
>>> I am not aware of this limitation. What error does it have?
>>> The bpf_dynptr_slice_rdwr kfunc() is also calling the bpf_dynptr_slice()
>>> kfunc.
>>>
>>>> create __bpf_dynptr_slice() which will be internal part of bpf_kfunc?
>>
>> Apparently Song has a patch to expose these bpf_dynptr_slice* functions
>> ton in-kernel users.
>>
>> https://lore.kernel.org/bpf/20231024235551.2769174-2-song@kernel.org/
>>
>> Should I wait for it to be merged before sending next version?
> 
> If you need something from another developer it's best to ask them
> explicitly :)
> In this case Song can respin with just that change that you need.

Got it. I actually need 2 different changes from the same patchset, I'll 
ping Song in the appropriate thread, thanks!
Andrii Nakryiko Nov. 2, 2023, 5:47 p.m. UTC | #12
On Thu, Nov 2, 2023 at 9:14 AM Vadim Fedorenko
<vadim.fedorenko@linux.dev> wrote:
>
> On 02/11/2023 15:36, Alexei Starovoitov wrote:
> > On Thu, Nov 2, 2023 at 6:44 AM Vadim Fedorenko
> > <vadim.fedorenko@linux.dev> wrote:
> >>
> >> On 01/11/2023 23:41, Martin KaFai Lau wrote:
> >>> On 11/1/23 3:50 PM, Vadim Fedorenko wrote:
> >>>>>> +static void *__bpf_dynptr_data_ptr(const struct bpf_dynptr_kern *ptr)
> >>>>>> +{
> >>>>>> +    enum bpf_dynptr_type type;
> >>>>>> +
> >>>>>> +    if (!ptr->data)
> >>>>>> +        return NULL;
> >>>>>> +
> >>>>>> +    type = bpf_dynptr_get_type(ptr);
> >>>>>> +
> >>>>>> +    switch (type) {
> >>>>>> +    case BPF_DYNPTR_TYPE_LOCAL:
> >>>>>> +    case BPF_DYNPTR_TYPE_RINGBUF:
> >>>>>> +        return ptr->data + ptr->offset;
> >>>>>> +    case BPF_DYNPTR_TYPE_SKB:
> >>>>>> +        return skb_pointer_if_linear(ptr->data, ptr->offset,
> >>>>>> __bpf_dynptr_size(ptr));
> >>>>>> +    case BPF_DYNPTR_TYPE_XDP:
> >>>>>> +    {
> >>>>>> +        void *xdp_ptr = bpf_xdp_pointer(ptr->data, ptr->offset,
> >>>>>> __bpf_dynptr_size(ptr));
> >>>>>
> >>>>> I suspect what it is doing here (for skb and xdp in particular) is
> >>>>> very similar to bpf_dynptr_slice. Please check if
> >>>>> bpf_dynptr_slice(ptr, 0, NULL, sz) will work.
> >>>>>
> >>>>
> >>>> Well, yes, it's simplified version of bpf_dynptr_slice. The problem is
> >>>> that bpf_dynptr_slice bpf_kfunc which cannot be used in another
> >>>> bpf_kfunc. Should I refactor the code to use it in both places? Like
> >>>
> >>> Sorry, scrolled too fast in my earlier reply :(
> >>>
> >>> I am not aware of this limitation. What error does it have?
> >>> The bpf_dynptr_slice_rdwr kfunc() is also calling the bpf_dynptr_slice()
> >>> kfunc.
> >>>
> >>>> create __bpf_dynptr_slice() which will be internal part of bpf_kfunc?
> >>
> >> Apparently Song has a patch to expose these bpf_dynptr_slice* functions
> >> ton in-kernel users.
> >>
> >> https://lore.kernel.org/bpf/20231024235551.2769174-2-song@kernel.org/
> >>
> >> Should I wait for it to be merged before sending next version?
> >
> > If you need something from another developer it's best to ask them
> > explicitly :)
> > In this case Song can respin with just that change that you need.
>
> Got it. I actually need 2 different changes from the same patchset, I'll
> ping Song in the appropriate thread, thanks!
>

Please also check my ramblings in [0]

  [0] https://patchwork.kernel.org/project/netdevbpf/patch/20231024235551.2769174-2-song@kernel.org/
diff mbox series

Patch

diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index d3c51a507508..5ad1e83394b3 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -1222,6 +1222,8 @@  enum bpf_dynptr_type {
 
 int bpf_dynptr_check_size(u32 size);
 u32 __bpf_dynptr_size(const struct bpf_dynptr_kern *ptr);
+enum bpf_dynptr_type bpf_dynptr_get_type(const struct bpf_dynptr_kern *ptr);
+bool __bpf_dynptr_is_rdonly(const struct bpf_dynptr_kern *ptr);
 
 #ifdef CONFIG_BPF_JIT
 int bpf_trampoline_link_prog(struct bpf_tramp_link *link, struct bpf_trampoline *tr);
diff --git a/kernel/bpf/Makefile b/kernel/bpf/Makefile
index f526b7573e97..e14b5834c477 100644
--- a/kernel/bpf/Makefile
+++ b/kernel/bpf/Makefile
@@ -41,6 +41,9 @@  obj-$(CONFIG_BPF_SYSCALL) += bpf_struct_ops.o
 obj-$(CONFIG_BPF_SYSCALL) += cpumask.o
 obj-${CONFIG_BPF_LSM} += bpf_lsm.o
 endif
+ifeq ($(CONFIG_CRYPTO_SKCIPHER),y)
+obj-$(CONFIG_BPF_SYSCALL) += crypto.o
+endif
 obj-$(CONFIG_BPF_PRELOAD) += preload/
 
 obj-$(CONFIG_BPF_SYSCALL) += relo_core.o
diff --git a/kernel/bpf/crypto.c b/kernel/bpf/crypto.c
new file mode 100644
index 000000000000..c2a0703934fc
--- /dev/null
+++ b/kernel/bpf/crypto.c
@@ -0,0 +1,280 @@ 
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2023 Meta, Inc */
+#include <linux/bpf.h>
+#include <linux/bpf_mem_alloc.h>
+#include <linux/btf.h>
+#include <linux/btf_ids.h>
+#include <linux/filter.h>
+#include <linux/scatterlist.h>
+#include <linux/skbuff.h>
+#include <crypto/skcipher.h>
+
+/**
+ * struct bpf_crypto_skcipher_ctx - refcounted BPF sync skcipher context structure
+ * @tfm:	The pointer to crypto_sync_skcipher struct.
+ * @rcu:	The RCU head used to free the crypto context with RCU safety.
+ * @usage:	Object reference counter. When the refcount goes to 0, the
+ *		memory is released back to the BPF allocator, which provides
+ *		RCU safety.
+ */
+struct bpf_crypto_skcipher_ctx {
+	struct crypto_sync_skcipher *tfm;
+	struct rcu_head rcu;
+	refcount_t usage;
+};
+
+__diag_push();
+__diag_ignore_all("-Wmissing-prototypes",
+		  "Global kfuncs as their definitions will be in BTF");
+
+static void *__bpf_dynptr_data_ptr(const struct bpf_dynptr_kern *ptr)
+{
+	enum bpf_dynptr_type type;
+
+	if (!ptr->data)
+		return NULL;
+
+	type = bpf_dynptr_get_type(ptr);
+
+	switch (type) {
+	case BPF_DYNPTR_TYPE_LOCAL:
+	case BPF_DYNPTR_TYPE_RINGBUF:
+		return ptr->data + ptr->offset;
+	case BPF_DYNPTR_TYPE_SKB:
+		return skb_pointer_if_linear(ptr->data, ptr->offset, __bpf_dynptr_size(ptr));
+	case BPF_DYNPTR_TYPE_XDP:
+	{
+		void *xdp_ptr = bpf_xdp_pointer(ptr->data, ptr->offset, __bpf_dynptr_size(ptr));
+		if (!IS_ERR_OR_NULL(xdp_ptr))
+			return xdp_ptr;
+
+		return NULL;
+	}
+	default:
+		WARN_ONCE(true, "unknown dynptr type %d\n", type);
+		return NULL;
+	}
+}
+
+/**
+ * bpf_crypto_skcipher_ctx_create() - Create a mutable BPF crypto context.
+ *
+ * Allocates a crypto context that can be used, acquired, and released by
+ * a BPF program. The crypto context returned by this function must either
+ * be embedded in a map as a kptr, or freed with bpf_crypto_skcipher_ctx_release().
+ *
+ * bpf_crypto_skcipher_ctx_create() allocates memory using the BPF memory
+ * allocator, and will not block. It may return NULL if no memory is available.
+ * @algo: bpf_dynptr which holds string representation of algorithm.
+ * @key:  bpf_dynptr which holds cipher key to do crypto.
+ */
+__bpf_kfunc struct bpf_crypto_skcipher_ctx *
+bpf_crypto_skcipher_ctx_create(const struct bpf_dynptr_kern *palgo,
+			       const struct bpf_dynptr_kern *pkey, int *err)
+{
+	struct bpf_crypto_skcipher_ctx *ctx;
+	char *algo;
+
+	if (__bpf_dynptr_size(palgo) > CRYPTO_MAX_ALG_NAME) {
+		*err = -EINVAL;
+		return NULL;
+	}
+
+	algo = __bpf_dynptr_data_ptr(palgo);
+
+	if (!crypto_has_skcipher(algo, CRYPTO_ALG_TYPE_SKCIPHER, CRYPTO_ALG_TYPE_MASK)) {
+		*err = -EOPNOTSUPP;
+		return NULL;
+	}
+
+	ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
+	if (!ctx) {
+		*err = -ENOMEM;
+		return NULL;
+	}
+
+	memset(ctx, 0, sizeof(*ctx));
+
+	ctx->tfm = crypto_alloc_sync_skcipher(algo, 0, 0);
+	if (IS_ERR(ctx->tfm)) {
+		*err = PTR_ERR(ctx->tfm);
+		ctx->tfm = NULL;
+		goto err;
+	}
+
+	*err = crypto_sync_skcipher_setkey(ctx->tfm, __bpf_dynptr_data_ptr(pkey),
+					   __bpf_dynptr_size(pkey));
+	if (*err)
+		goto err;
+
+	refcount_set(&ctx->usage, 1);
+
+	return ctx;
+err:
+	if (ctx->tfm)
+		crypto_free_sync_skcipher(ctx->tfm);
+	kfree(ctx);
+
+	return NULL;
+}
+
+static void crypto_free_sync_skcipher_cb(struct rcu_head *head)
+{
+	struct bpf_crypto_skcipher_ctx *ctx;
+
+	ctx = container_of(head, struct bpf_crypto_skcipher_ctx, rcu);
+	crypto_free_sync_skcipher(ctx->tfm);
+	kfree(ctx);
+}
+
+/**
+ * bpf_crypto_skcipher_ctx_acquire() - Acquire a reference to a BPF crypto context.
+ * @ctx: The BPF crypto context being acquired. The ctx must be a trusted
+ *	     pointer.
+ *
+ * Acquires a reference to a BPF crypto context. The context returned by this function
+ * must either be embedded in a map as a kptr, or freed with
+ * bpf_crypto_skcipher_ctx_release().
+ */
+__bpf_kfunc struct bpf_crypto_skcipher_ctx *
+bpf_crypto_skcipher_ctx_acquire(struct bpf_crypto_skcipher_ctx *ctx)
+{
+	refcount_inc(&ctx->usage);
+	return ctx;
+}
+
+/**
+ * bpf_crypto_skcipher_ctx_release() - Release a previously acquired BPF crypto context.
+ * @ctx: The crypto context being released.
+ *
+ * Releases a previously acquired reference to a BPF cpumask. When the final
+ * reference of the BPF cpumask has been released, it is subsequently freed in
+ * an RCU callback in the BPF memory allocator.
+ */
+__bpf_kfunc void bpf_crypto_skcipher_ctx_release(struct bpf_crypto_skcipher_ctx *ctx)
+{
+	if (refcount_dec_and_test(&ctx->usage))
+		call_rcu(&ctx->rcu, crypto_free_sync_skcipher_cb);
+}
+
+static int bpf_crypto_skcipher_crypt(struct crypto_sync_skcipher *tfm,
+				     const struct bpf_dynptr_kern *src,
+				     struct bpf_dynptr_kern *dst,
+				     const struct bpf_dynptr_kern *iv,
+				     bool decrypt)
+{
+	struct skcipher_request *req = NULL;
+	struct scatterlist sgin, sgout;
+	int err;
+
+	if (crypto_sync_skcipher_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
+		return -EINVAL;
+
+	if (__bpf_dynptr_is_rdonly(dst))
+		return -EINVAL;
+
+	if (!__bpf_dynptr_size(dst) || !__bpf_dynptr_size(src))
+		return -EINVAL;
+
+	if (__bpf_dynptr_size(iv) != crypto_sync_skcipher_ivsize(tfm))
+		return -EINVAL;
+
+	req = skcipher_request_alloc(&tfm->base, GFP_ATOMIC);
+	if (!req)
+		return -ENOMEM;
+
+	sg_init_one(&sgin, __bpf_dynptr_data_ptr(src), __bpf_dynptr_size(src));
+	sg_init_one(&sgout, __bpf_dynptr_data_ptr(dst), __bpf_dynptr_size(dst));
+
+	skcipher_request_set_crypt(req, &sgin, &sgout, __bpf_dynptr_size(src),
+				   __bpf_dynptr_data_ptr(iv));
+
+	err = decrypt ? crypto_skcipher_decrypt(req) : crypto_skcipher_encrypt(req);
+
+	skcipher_request_free(req);
+
+	return err;
+}
+
+/**
+ * bpf_crypto_skcipher_decrypt() - Decrypt buffer using configured context and IV provided.
+ * @ctx:	The crypto context being used. The ctx must be a trusted pointer.
+ * @src:	bpf_dynptr to the encrypted data. Must be a trusted pointer.
+ * @dst:	bpf_dynptr to the buffer where to store the result. Must be a trusted pointer.
+ * @iv:		bpf_dynptr to IV data to be used by decryptor.
+ *
+ * Decrypts provided buffer using IV data and the crypto context. Crypto context must be configured.
+ */
+__bpf_kfunc int bpf_crypto_skcipher_decrypt(struct bpf_crypto_skcipher_ctx *ctx,
+					    const struct bpf_dynptr_kern *src,
+					    struct bpf_dynptr_kern *dst,
+					    const struct bpf_dynptr_kern *iv)
+{
+	return bpf_crypto_skcipher_crypt(ctx->tfm, src, dst, iv, true);
+}
+
+/**
+ * bpf_crypto_skcipher_encrypt() - Encrypt buffer using configured context and IV provided.
+ * @ctx:	The crypto context being used. The ctx must be a trusted pointer.
+ * @src:	bpf_dynptr to the plain data. Must be a trusted pointer.
+ * @dst:	bpf_dynptr to buffer where to store the result. Must be a trusted pointer.
+ * @iv:		bpf_dynptr to IV data to be used by decryptor.
+ *
+ * Encrypts provided buffer using IV data and the crypto context. Crypto context must be configured.
+ */
+__bpf_kfunc int bpf_crypto_skcipher_encrypt(struct bpf_crypto_skcipher_ctx *ctx,
+					    const struct bpf_dynptr_kern *src,
+					    struct bpf_dynptr_kern *dst,
+					    const struct bpf_dynptr_kern *iv)
+{
+	return bpf_crypto_skcipher_crypt(ctx->tfm, src, dst, iv, false);
+}
+
+__diag_pop();
+
+BTF_SET8_START(crypt_skcipher_init_kfunc_btf_ids)
+BTF_ID_FLAGS(func, bpf_crypto_skcipher_ctx_create, KF_ACQUIRE | KF_RET_NULL | KF_SLEEPABLE)
+BTF_ID_FLAGS(func, bpf_crypto_skcipher_ctx_release, KF_RELEASE)
+BTF_ID_FLAGS(func, bpf_crypto_skcipher_ctx_acquire, KF_ACQUIRE | KF_TRUSTED_ARGS)
+BTF_SET8_END(crypt_skcipher_init_kfunc_btf_ids)
+
+static const struct btf_kfunc_id_set crypt_skcipher_init_kfunc_set = {
+	.owner = THIS_MODULE,
+	.set   = &crypt_skcipher_init_kfunc_btf_ids,
+};
+
+BTF_SET8_START(crypt_skcipher_kfunc_btf_ids)
+BTF_ID_FLAGS(func, bpf_crypto_skcipher_decrypt, KF_RCU)
+BTF_ID_FLAGS(func, bpf_crypto_skcipher_encrypt, KF_RCU)
+BTF_SET8_END(crypt_skcipher_kfunc_btf_ids)
+
+static const struct btf_kfunc_id_set crypt_skcipher_kfunc_set = {
+	.owner = THIS_MODULE,
+	.set   = &crypt_skcipher_kfunc_btf_ids,
+};
+
+BTF_ID_LIST(crypto_skcipher_dtor_ids)
+BTF_ID(struct, bpf_crypto_skcipher_ctx)
+BTF_ID(func, bpf_crypto_skcipher_ctx_release)
+
+static int __init crypto_skcipher_kfunc_init(void)
+{
+	int ret;
+	const struct btf_id_dtor_kfunc crypto_skcipher_dtors[] = {
+		{
+			.btf_id	      = crypto_skcipher_dtor_ids[0],
+			.kfunc_btf_id = crypto_skcipher_dtor_ids[1]
+		},
+	};
+
+	ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_SCHED_CLS, &crypt_skcipher_kfunc_set);
+	ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SCHED_ACT, &crypt_skcipher_kfunc_set);
+	ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_XDP, &crypt_skcipher_kfunc_set);
+	ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_UNSPEC,
+					       &crypt_skcipher_init_kfunc_set);
+	return  ret ?: register_btf_id_dtor_kfuncs(crypto_skcipher_dtors,
+						   ARRAY_SIZE(crypto_skcipher_dtors),
+						   THIS_MODULE);
+}
+
+late_initcall(crypto_skcipher_kfunc_init);
diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c
index 62a53ebfedf9..2020884fca3d 100644
--- a/kernel/bpf/helpers.c
+++ b/kernel/bpf/helpers.c
@@ -1429,7 +1429,7 @@  static const struct bpf_func_proto bpf_kptr_xchg_proto = {
 #define DYNPTR_SIZE_MASK	0xFFFFFF
 #define DYNPTR_RDONLY_BIT	BIT(31)
 
-static bool __bpf_dynptr_is_rdonly(const struct bpf_dynptr_kern *ptr)
+bool __bpf_dynptr_is_rdonly(const struct bpf_dynptr_kern *ptr)
 {
 	return ptr->size & DYNPTR_RDONLY_BIT;
 }
@@ -1444,7 +1444,7 @@  static void bpf_dynptr_set_type(struct bpf_dynptr_kern *ptr, enum bpf_dynptr_typ
 	ptr->size |= type << DYNPTR_TYPE_SHIFT;
 }
 
-static enum bpf_dynptr_type bpf_dynptr_get_type(const struct bpf_dynptr_kern *ptr)
+enum bpf_dynptr_type bpf_dynptr_get_type(const struct bpf_dynptr_kern *ptr)
 {
 	return (ptr->size & ~(DYNPTR_RDONLY_BIT)) >> DYNPTR_TYPE_SHIFT;
 }
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index bb58987e4844..75d2f47ca3cb 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -5184,6 +5184,7 @@  BTF_ID(struct, prog_test_ref_kfunc)
 BTF_ID(struct, cgroup)
 BTF_ID(struct, bpf_cpumask)
 BTF_ID(struct, task_struct)
+BTF_ID(struct, bpf_crypto_skcipher_ctx)
 BTF_SET_END(rcu_protected_types)
 
 static bool rcu_protected_object(const struct btf *btf, u32 btf_id)