diff mbox series

[bpf-next,v3,04/10] bpf: lsm: Add mutable hooks list for the BPF LSM

Message ID 20200123152440.28956-5-kpsingh@chromium.org (mailing list archive)
State New, archived
Headers show
Series MAC and Audit policy using eBPF (KRSI) | expand

Commit Message

KP Singh Jan. 23, 2020, 3:24 p.m. UTC
From: KP Singh <kpsingh@google.com>

- The list of hooks registered by an LSM is currently immutable as they
  are declared with __lsm_ro_after_init and they are attached to a
  security_hook_heads struct.
- For the BPF LSM we need to de/register the hooks at runtime. Making
  the existing security_hook_heads mutable broadens an
  attack vector, so a separate security_hook_heads is added for only
  those that ~must~ be mutable.
- These mutable hooks are run only after all the static hooks have
  successfully executed.

This is based on the ideas discussed in:

  https://lore.kernel.org/lkml/20180408065916.GA2832@ircssh-2.c.rugged-nimbus-611.internal

Reviewed-by: Brendan Jackman <jackmanb@google.com>
Reviewed-by: Florent Revest <revest@google.com>
Reviewed-by: Thomas Garnier <thgarnie@google.com>
Signed-off-by: KP Singh <kpsingh@google.com>
---
 MAINTAINERS             |  1 +
 include/linux/bpf_lsm.h | 72 +++++++++++++++++++++++++++++++++++++++++
 security/bpf/Kconfig    |  1 +
 security/bpf/Makefile   |  2 +-
 security/bpf/hooks.c    | 20 ++++++++++++
 security/bpf/lsm.c      |  7 ++++
 security/security.c     | 25 +++++++-------
 7 files changed, 116 insertions(+), 12 deletions(-)
 create mode 100644 include/linux/bpf_lsm.h
 create mode 100644 security/bpf/hooks.c

Comments

Casey Schaufler Jan. 23, 2020, 5:03 p.m. UTC | #1
On 1/23/2020 7:24 AM, KP Singh wrote:
> From: KP Singh <kpsingh@google.com>
>
> - The list of hooks registered by an LSM is currently immutable as they
>   are declared with __lsm_ro_after_init and they are attached to a
>   security_hook_heads struct.
> - For the BPF LSM we need to de/register the hooks at runtime. Making
>   the existing security_hook_heads mutable broadens an
>   attack vector, so a separate security_hook_heads is added for only
>   those that ~must~ be mutable.
> - These mutable hooks are run only after all the static hooks have
>   successfully executed.
>
> This is based on the ideas discussed in:
>
>   https://lore.kernel.org/lkml/20180408065916.GA2832@ircssh-2.c.rugged-nimbus-611.internal
>
> Reviewed-by: Brendan Jackman <jackmanb@google.com>
> Reviewed-by: Florent Revest <revest@google.com>
> Reviewed-by: Thomas Garnier <thgarnie@google.com>
> Signed-off-by: KP Singh <kpsingh@google.com>
> ---
>  MAINTAINERS             |  1 +
>  include/linux/bpf_lsm.h | 72 +++++++++++++++++++++++++++++++++++++++++
>  security/bpf/Kconfig    |  1 +
>  security/bpf/Makefile   |  2 +-
>  security/bpf/hooks.c    | 20 ++++++++++++
>  security/bpf/lsm.c      |  7 ++++
>  security/security.c     | 25 +++++++-------
>  7 files changed, 116 insertions(+), 12 deletions(-)
>  create mode 100644 include/linux/bpf_lsm.h
>  create mode 100644 security/bpf/hooks.c
>
> diff --git a/MAINTAINERS b/MAINTAINERS
> index e2b7f76a1a70..c606b3d89992 100644
> --- a/MAINTAINERS
> +++ b/MAINTAINERS
> @@ -3209,6 +3209,7 @@ L:	linux-security-module@vger.kernel.org
>  L:	bpf@vger.kernel.org
>  S:	Maintained
>  F:	security/bpf/
> +F:	include/linux/bpf_lsm.h
>  
>  BROADCOM B44 10/100 ETHERNET DRIVER
>  M:	Michael Chan <michael.chan@broadcom.com>
> diff --git a/include/linux/bpf_lsm.h b/include/linux/bpf_lsm.h
> new file mode 100644
> index 000000000000..57c20b2cd2f4
> --- /dev/null
> +++ b/include/linux/bpf_lsm.h
> @@ -0,0 +1,72 @@
> +/* SPDX-License-Identifier: GPL-2.0 */
> +
> +/*
> + * Copyright 2019 Google LLC.
> + */
> +
> +#ifndef _LINUX_BPF_LSM_H
> +#define _LINUX_BPF_LSM_H
> +
> +#include <linux/bpf.h>
> +#include <linux/lsm_hooks.h>
> +
> +#ifdef CONFIG_SECURITY_BPF
> +
> +/* Mutable hooks defined at runtime and executed after all the statically
> + * defined LSM hooks.
> + */
> +extern struct security_hook_heads bpf_lsm_hook_heads;
> +
> +int bpf_lsm_srcu_read_lock(void);
> +void bpf_lsm_srcu_read_unlock(int idx);
> +
> +#define CALL_BPF_LSM_VOID_HOOKS(FUNC, ...)			\
> +	do {							\
> +		struct security_hook_list *P;			\
> +		int _idx;					\
> +								\
> +		if (hlist_empty(&bpf_lsm_hook_heads.FUNC))	\
> +			break;					\
> +								\
> +		_idx = bpf_lsm_srcu_read_lock();		\
> +		hlist_for_each_entry(P, &bpf_lsm_hook_heads.FUNC, list) \
> +			P->hook.FUNC(__VA_ARGS__);		\
> +		bpf_lsm_srcu_read_unlock(_idx);			\
> +	} while (0)
> +
> +#define CALL_BPF_LSM_INT_HOOKS(FUNC, ...) ({			\
> +	int _ret = 0;						\
> +	do {							\
> +		struct security_hook_list *P;			\
> +		int _idx;					\
> +								\
> +		if (hlist_empty(&bpf_lsm_hook_heads.FUNC))	\
> +			break;					\
> +								\
> +		_idx = bpf_lsm_srcu_read_lock();		\
> +								\
> +		hlist_for_each_entry(P,				\
> +			&bpf_lsm_hook_heads.FUNC, list) {	\
> +			_ret = P->hook.FUNC(__VA_ARGS__);		\
> +			if (_ret && IS_ENABLED(CONFIG_SECURITY_BPF_ENFORCE)) \
> +				break;				\
> +		}						\
> +		bpf_lsm_srcu_read_unlock(_idx);			\
> +	} while (0);						\
> +	IS_ENABLED(CONFIG_SECURITY_BPF_ENFORCE) ? _ret : 0;	\
> +})
> +
> +#else /* !CONFIG_SECURITY_BPF */
> +
> +#define CALL_BPF_LSM_INT_HOOKS(FUNC, ...) (0)
> +#define CALL_BPF_LSM_VOID_HOOKS(...)
> +
> +static inline int bpf_lsm_srcu_read_lock(void)
> +{
> +	return 0;
> +}
> +static inline void bpf_lsm_srcu_read_unlock(int idx) {}
> +
> +#endif /* CONFIG_SECURITY_BPF */
> +
> +#endif /* _LINUX_BPF_LSM_H */
> diff --git a/security/bpf/Kconfig b/security/bpf/Kconfig
> index a5f6c67ae526..595e4ad597ae 100644
> --- a/security/bpf/Kconfig
> +++ b/security/bpf/Kconfig
> @@ -6,6 +6,7 @@ config SECURITY_BPF
>  	bool "BPF-based MAC and audit policy"
>  	depends on SECURITY
>  	depends on BPF_SYSCALL
> +	depends on SRCU
>  	help
>  	  This enables instrumentation of the security hooks with
>  	  eBPF programs.
> diff --git a/security/bpf/Makefile b/security/bpf/Makefile
> index c78a8a056e7e..c526927c337d 100644
> --- a/security/bpf/Makefile
> +++ b/security/bpf/Makefile
> @@ -2,4 +2,4 @@
>  #
>  # Copyright 2019 Google LLC.
>  
> -obj-$(CONFIG_SECURITY_BPF) := lsm.o ops.o
> +obj-$(CONFIG_SECURITY_BPF) := lsm.o ops.o hooks.o
> diff --git a/security/bpf/hooks.c b/security/bpf/hooks.c
> new file mode 100644
> index 000000000000..b123d9cb4cd4
> --- /dev/null
> +++ b/security/bpf/hooks.c
> @@ -0,0 +1,20 @@
> +// SPDX-License-Identifier: GPL-2.0
> +
> +/*
> + * Copyright 2019 Google LLC.
> + */
> +
> +#include <linux/bpf_lsm.h>
> +#include <linux/srcu.h>
> +
> +DEFINE_STATIC_SRCU(security_hook_srcu);
> +
> +int bpf_lsm_srcu_read_lock(void)
> +{
> +	return srcu_read_lock(&security_hook_srcu);
> +}
> +
> +void bpf_lsm_srcu_read_unlock(int idx)
> +{
> +	return srcu_read_unlock(&security_hook_srcu, idx);
> +}
> diff --git a/security/bpf/lsm.c b/security/bpf/lsm.c
> index dc9ac03c7aa0..a25a068e1781 100644
> --- a/security/bpf/lsm.c
> +++ b/security/bpf/lsm.c
> @@ -4,6 +4,7 @@
>   * Copyright 2019 Google LLC.
>   */
>  
> +#include <linux/bpf_lsm.h>
>  #include <linux/lsm_hooks.h>
>  
>  /* This is only for internal hooks, always statically shipped as part of the
> @@ -12,6 +13,12 @@
>   */
>  static struct security_hook_list bpf_lsm_hooks[] __lsm_ro_after_init = {};
>  
> +/* Security hooks registered dynamically by the BPF LSM and must be accessed
> + * by holding bpf_lsm_srcu_read_lock and bpf_lsm_srcu_read_unlock. The mutable
> + * hooks dynamically allocated by the BPF LSM are appeneded here.
> + */
> +struct security_hook_heads bpf_lsm_hook_heads;
> +
>  static int __init bpf_lsm_init(void)
>  {
>  	security_add_hooks(bpf_lsm_hooks, ARRAY_SIZE(bpf_lsm_hooks), "bpf");
> diff --git a/security/security.c b/security/security.c
> index 30a8aa700557..95a46ca25dcd 100644
> --- a/security/security.c
> +++ b/security/security.c
> @@ -27,6 +27,7 @@
>  #include <linux/backing-dev.h>
>  #include <linux/string.h>
>  #include <linux/msg.h>
> +#include <linux/bpf_lsm.h>
>  #include <net/flow.h>
>  
>  #define MAX_LSM_EVM_XATTR	2
> @@ -657,20 +658,22 @@ static void __init lsm_early_task(struct task_struct *task)
>  								\
>  		hlist_for_each_entry(P, &security_hook_heads.FUNC, list) \
>  			P->hook.FUNC(__VA_ARGS__);		\
> +		CALL_BPF_LSM_VOID_HOOKS(FUNC, __VA_ARGS__);	\

I'm sorry if I wasn't clear on the v2 review.
This does not belong in the infrastructure. You should be
doing all the bpf_lsm hook processing in you module.
bpf_lsm_task_alloc() should loop though all the bpf
task_alloc hooks if they have to be handled differently
from "normal" LSM hooks.

>  	} while (0)
>  
> -#define call_int_hook(FUNC, IRC, ...) ({			\
> -	int RC = IRC;						\
> -	do {							\
> -		struct security_hook_list *P;			\
> -								\
> +#define call_int_hook(FUNC, IRC, ...) ({				\
> +	int RC = IRC;							\
> +	do {								\
> +		struct security_hook_list *P;				\
>  		hlist_for_each_entry(P, &security_hook_heads.FUNC, list) { \
> -			RC = P->hook.FUNC(__VA_ARGS__);		\
> -			if (RC != 0)				\
> -				break;				\
> -		}						\
> -	} while (0);						\
> -	RC;							\
> +			RC = P->hook.FUNC(__VA_ARGS__);			\
> +			if (RC != 0)					\
> +				break;					\
> +		}							\
> +		if (RC == 0)						\
> +			RC = CALL_BPF_LSM_INT_HOOKS(FUNC, __VA_ARGS__);	\
> +	} while (0);							\
> +	RC;								\
>  })
>  
>  /* Security operations */
KP Singh Jan. 23, 2020, 5:59 p.m. UTC | #2
On 23-Jan 09:03, Casey Schaufler wrote:
> On 1/23/2020 7:24 AM, KP Singh wrote:
> > From: KP Singh <kpsingh@google.com>
> >
> > - The list of hooks registered by an LSM is currently immutable as they
> >   are declared with __lsm_ro_after_init and they are attached to a
> >   security_hook_heads struct.
> > - For the BPF LSM we need to de/register the hooks at runtime. Making
> >   the existing security_hook_heads mutable broadens an
> >   attack vector, so a separate security_hook_heads is added for only
> >   those that ~must~ be mutable.
> > - These mutable hooks are run only after all the static hooks have
> >   successfully executed.
> >
> > This is based on the ideas discussed in:
> >
> >   https://lore.kernel.org/lkml/20180408065916.GA2832@ircssh-2.c.rugged-nimbus-611.internal
> >
> > Reviewed-by: Brendan Jackman <jackmanb@google.com>
> > Reviewed-by: Florent Revest <revest@google.com>
> > Reviewed-by: Thomas Garnier <thgarnie@google.com>
> > Signed-off-by: KP Singh <kpsingh@google.com>
> > ---
> >  MAINTAINERS             |  1 +
> >  include/linux/bpf_lsm.h | 72 +++++++++++++++++++++++++++++++++++++++++
> >  security/bpf/Kconfig    |  1 +
> >  security/bpf/Makefile   |  2 +-
> >  security/bpf/hooks.c    | 20 ++++++++++++
> >  security/bpf/lsm.c      |  7 ++++
> >  security/security.c     | 25 +++++++-------
> >  7 files changed, 116 insertions(+), 12 deletions(-)
> >  create mode 100644 include/linux/bpf_lsm.h
> >  create mode 100644 security/bpf/hooks.c
> >
> > diff --git a/MAINTAINERS b/MAINTAINERS
> > index e2b7f76a1a70..c606b3d89992 100644
> > --- a/MAINTAINERS
> > +++ b/MAINTAINERS
> > @@ -3209,6 +3209,7 @@ L:	linux-security-module@vger.kernel.org
> >  L:	bpf@vger.kernel.org
> >  S:	Maintained
> >  F:	security/bpf/
> > +F:	include/linux/bpf_lsm.h
> >  
> >  BROADCOM B44 10/100 ETHERNET DRIVER
> >  M:	Michael Chan <michael.chan@broadcom.com>
> > diff --git a/include/linux/bpf_lsm.h b/include/linux/bpf_lsm.h
> > new file mode 100644
> > index 000000000000..57c20b2cd2f4
> > --- /dev/null
> > +++ b/include/linux/bpf_lsm.h
> > @@ -0,0 +1,72 @@
> > +/* SPDX-License-Identifier: GPL-2.0 */
> > +
> > +/*
> > + * Copyright 2019 Google LLC.
> > + */
> > +
> > +#ifndef _LINUX_BPF_LSM_H
> > +#define _LINUX_BPF_LSM_H
> > +
> > +#include <linux/bpf.h>
> > +#include <linux/lsm_hooks.h>
> > +
> > +#ifdef CONFIG_SECURITY_BPF
> > +
> > +/* Mutable hooks defined at runtime and executed after all the statically
> > + * defined LSM hooks.
> > + */
> > +extern struct security_hook_heads bpf_lsm_hook_heads;
> > +
> > +int bpf_lsm_srcu_read_lock(void);
> > +void bpf_lsm_srcu_read_unlock(int idx);
> > +
> > +#define CALL_BPF_LSM_VOID_HOOKS(FUNC, ...)			\
> > +	do {							\
> > +		struct security_hook_list *P;			\
> > +		int _idx;					\
> > +								\
> > +		if (hlist_empty(&bpf_lsm_hook_heads.FUNC))	\
> > +			break;					\
> > +								\
> > +		_idx = bpf_lsm_srcu_read_lock();		\
> > +		hlist_for_each_entry(P, &bpf_lsm_hook_heads.FUNC, list) \
> > +			P->hook.FUNC(__VA_ARGS__);		\
> > +		bpf_lsm_srcu_read_unlock(_idx);			\
> > +	} while (0)
> > +
> > +#define CALL_BPF_LSM_INT_HOOKS(FUNC, ...) ({			\
> > +	int _ret = 0;						\
> > +	do {							\
> > +		struct security_hook_list *P;			\
> > +		int _idx;					\
> > +								\
> > +		if (hlist_empty(&bpf_lsm_hook_heads.FUNC))	\
> > +			break;					\
> > +								\
> > +		_idx = bpf_lsm_srcu_read_lock();		\
> > +								\
> > +		hlist_for_each_entry(P,				\
> > +			&bpf_lsm_hook_heads.FUNC, list) {	\
> > +			_ret = P->hook.FUNC(__VA_ARGS__);		\
> > +			if (_ret && IS_ENABLED(CONFIG_SECURITY_BPF_ENFORCE)) \
> > +				break;				\
> > +		}						\
> > +		bpf_lsm_srcu_read_unlock(_idx);			\
> > +	} while (0);						\
> > +	IS_ENABLED(CONFIG_SECURITY_BPF_ENFORCE) ? _ret : 0;	\
> > +})
> > +
> > +#else /* !CONFIG_SECURITY_BPF */
> > +
> > +#define CALL_BPF_LSM_INT_HOOKS(FUNC, ...) (0)
> > +#define CALL_BPF_LSM_VOID_HOOKS(...)
> > +
> > +static inline int bpf_lsm_srcu_read_lock(void)
> > +{
> > +	return 0;
> > +}
> > +static inline void bpf_lsm_srcu_read_unlock(int idx) {}
> > +
> > +#endif /* CONFIG_SECURITY_BPF */
> > +
> > +#endif /* _LINUX_BPF_LSM_H */
> > diff --git a/security/bpf/Kconfig b/security/bpf/Kconfig
> > index a5f6c67ae526..595e4ad597ae 100644
> > --- a/security/bpf/Kconfig
> > +++ b/security/bpf/Kconfig
> > @@ -6,6 +6,7 @@ config SECURITY_BPF
> >  	bool "BPF-based MAC and audit policy"
> >  	depends on SECURITY
> >  	depends on BPF_SYSCALL
> > +	depends on SRCU
> >  	help
> >  	  This enables instrumentation of the security hooks with
> >  	  eBPF programs.
> > diff --git a/security/bpf/Makefile b/security/bpf/Makefile
> > index c78a8a056e7e..c526927c337d 100644
> > --- a/security/bpf/Makefile
> > +++ b/security/bpf/Makefile
> > @@ -2,4 +2,4 @@
> >  #
> >  # Copyright 2019 Google LLC.
> >  
> > -obj-$(CONFIG_SECURITY_BPF) := lsm.o ops.o
> > +obj-$(CONFIG_SECURITY_BPF) := lsm.o ops.o hooks.o
> > diff --git a/security/bpf/hooks.c b/security/bpf/hooks.c
> > new file mode 100644
> > index 000000000000..b123d9cb4cd4
> > --- /dev/null
> > +++ b/security/bpf/hooks.c
> > @@ -0,0 +1,20 @@
> > +// SPDX-License-Identifier: GPL-2.0
> > +
> > +/*
> > + * Copyright 2019 Google LLC.
> > + */
> > +
> > +#include <linux/bpf_lsm.h>
> > +#include <linux/srcu.h>
> > +
> > +DEFINE_STATIC_SRCU(security_hook_srcu);
> > +
> > +int bpf_lsm_srcu_read_lock(void)
> > +{
> > +	return srcu_read_lock(&security_hook_srcu);
> > +}
> > +
> > +void bpf_lsm_srcu_read_unlock(int idx)
> > +{
> > +	return srcu_read_unlock(&security_hook_srcu, idx);
> > +}
> > diff --git a/security/bpf/lsm.c b/security/bpf/lsm.c
> > index dc9ac03c7aa0..a25a068e1781 100644
> > --- a/security/bpf/lsm.c
> > +++ b/security/bpf/lsm.c
> > @@ -4,6 +4,7 @@
> >   * Copyright 2019 Google LLC.
> >   */
> >  
> > +#include <linux/bpf_lsm.h>
> >  #include <linux/lsm_hooks.h>
> >  
> >  /* This is only for internal hooks, always statically shipped as part of the
> > @@ -12,6 +13,12 @@
> >   */
> >  static struct security_hook_list bpf_lsm_hooks[] __lsm_ro_after_init = {};
> >  
> > +/* Security hooks registered dynamically by the BPF LSM and must be accessed
> > + * by holding bpf_lsm_srcu_read_lock and bpf_lsm_srcu_read_unlock. The mutable
> > + * hooks dynamically allocated by the BPF LSM are appeneded here.
> > + */
> > +struct security_hook_heads bpf_lsm_hook_heads;
> > +
> >  static int __init bpf_lsm_init(void)
> >  {
> >  	security_add_hooks(bpf_lsm_hooks, ARRAY_SIZE(bpf_lsm_hooks), "bpf");
> > diff --git a/security/security.c b/security/security.c
> > index 30a8aa700557..95a46ca25dcd 100644
> > --- a/security/security.c
> > +++ b/security/security.c
> > @@ -27,6 +27,7 @@
> >  #include <linux/backing-dev.h>
> >  #include <linux/string.h>
> >  #include <linux/msg.h>
> > +#include <linux/bpf_lsm.h>
> >  #include <net/flow.h>
> >  
> >  #define MAX_LSM_EVM_XATTR	2
> > @@ -657,20 +658,22 @@ static void __init lsm_early_task(struct task_struct *task)
> >  								\
> >  		hlist_for_each_entry(P, &security_hook_heads.FUNC, list) \
> >  			P->hook.FUNC(__VA_ARGS__);		\
> > +		CALL_BPF_LSM_VOID_HOOKS(FUNC, __VA_ARGS__);	\
> 
> I'm sorry if I wasn't clear on the v2 review.
> This does not belong in the infrastructure. You should be
> doing all the bpf_lsm hook processing in you module.
> bpf_lsm_task_alloc() should loop though all the bpf
> task_alloc hooks if they have to be handled differently
> from "normal" LSM hooks.

The BPF LSM does not define static hooks (the ones registered to
security_hook_heads in security.c with __lsm_ro_after_init) for each
LSM hook. If it tries to do that one ends with what was in v1:

  https://lore.kernel.org/bpf/20191220154208.15895-7-kpsingh@chromium.org

This gets quite ugly (security/bpf/hooks.h from v1) and was noted by
the BPF maintainers:

  https://lore.kernel.org/bpf/20191222012722.gdqhppxpfmqfqbld@ast-mbp.dhcp.thefacebook.com/

As I mentioned, some of the ideas we used here are based on:

  https://lore.kernel.org/lkml/20180408065916.GA2832@ircssh-2.c.rugged-nimbus-611.internal

Which gave each LSM the ability to add mutable hooks at runtime. If
you prefer we can make this generic and allow the LSMs to register
mutable hooks with the BPF LSM be the only LSM that uses it (and
enforce it with a whitelist).

Would this generic approach be something you would consider better
than just calling the BPF mutable hooks directly?

- KP

> 
> >  	} while (0)
> >  
> > -#define call_int_hook(FUNC, IRC, ...) ({			\
> > -	int RC = IRC;						\
> > -	do {							\
> > -		struct security_hook_list *P;			\
> > -								\
> > +#define call_int_hook(FUNC, IRC, ...) ({				\
> > +	int RC = IRC;							\
> > +	do {								\
> > +		struct security_hook_list *P;				\
> >  		hlist_for_each_entry(P, &security_hook_heads.FUNC, list) { \
> > -			RC = P->hook.FUNC(__VA_ARGS__);		\
> > -			if (RC != 0)				\
> > -				break;				\
> > -		}						\
> > -	} while (0);						\
> > -	RC;							\
> > +			RC = P->hook.FUNC(__VA_ARGS__);			\
> > +			if (RC != 0)					\
> > +				break;					\
> > +		}							\
> > +		if (RC == 0)						\
> > +			RC = CALL_BPF_LSM_INT_HOOKS(FUNC, __VA_ARGS__);	\
> > +	} while (0);							\
> > +	RC;								\
> >  })
> >  
> >  /* Security operations */
Casey Schaufler Jan. 23, 2020, 7:09 p.m. UTC | #3
On 1/23/2020 9:59 AM, KP Singh wrote:
> On 23-Jan 09:03, Casey Schaufler wrote:
>> On 1/23/2020 7:24 AM, KP Singh wrote:
>>> From: KP Singh <kpsingh@google.com>
>>>
>>> - The list of hooks registered by an LSM is currently immutable as they
>>>   are declared with __lsm_ro_after_init and they are attached to a
>>>   security_hook_heads struct.
>>> - For the BPF LSM we need to de/register the hooks at runtime. Making
>>>   the existing security_hook_heads mutable broadens an
>>>   attack vector, so a separate security_hook_heads is added for only
>>>   those that ~must~ be mutable.
>>> - These mutable hooks are run only after all the static hooks have
>>>   successfully executed.
>>>
>>> This is based on the ideas discussed in:
>>>
>>>   https://lore.kernel.org/lkml/20180408065916.GA2832@ircssh-2.c.rugged-nimbus-611.internal
>>>
>>> Reviewed-by: Brendan Jackman <jackmanb@google.com>
>>> Reviewed-by: Florent Revest <revest@google.com>
>>> Reviewed-by: Thomas Garnier <thgarnie@google.com>
>>> Signed-off-by: KP Singh <kpsingh@google.com>
>>> ---
>>>  MAINTAINERS             |  1 +
>>>  include/linux/bpf_lsm.h | 72 +++++++++++++++++++++++++++++++++++++++++
>>>  security/bpf/Kconfig    |  1 +
>>>  security/bpf/Makefile   |  2 +-
>>>  security/bpf/hooks.c    | 20 ++++++++++++
>>>  security/bpf/lsm.c      |  7 ++++
>>>  security/security.c     | 25 +++++++-------
>>>  7 files changed, 116 insertions(+), 12 deletions(-)
>>>  create mode 100644 include/linux/bpf_lsm.h
>>>  create mode 100644 security/bpf/hooks.c
>>>
>>> diff --git a/MAINTAINERS b/MAINTAINERS
>>> index e2b7f76a1a70..c606b3d89992 100644
>>> --- a/MAINTAINERS
>>> +++ b/MAINTAINERS
>>> @@ -3209,6 +3209,7 @@ L:	linux-security-module@vger.kernel.org
>>>  L:	bpf@vger.kernel.org
>>>  S:	Maintained
>>>  F:	security/bpf/
>>> +F:	include/linux/bpf_lsm.h
>>>  
>>>  BROADCOM B44 10/100 ETHERNET DRIVER
>>>  M:	Michael Chan <michael.chan@broadcom.com>
>>> diff --git a/include/linux/bpf_lsm.h b/include/linux/bpf_lsm.h
>>> new file mode 100644
>>> index 000000000000..57c20b2cd2f4
>>> --- /dev/null
>>> +++ b/include/linux/bpf_lsm.h
>>> @@ -0,0 +1,72 @@
>>> +/* SPDX-License-Identifier: GPL-2.0 */
>>> +
>>> +/*
>>> + * Copyright 2019 Google LLC.
>>> + */
>>> +
>>> +#ifndef _LINUX_BPF_LSM_H
>>> +#define _LINUX_BPF_LSM_H
>>> +
>>> +#include <linux/bpf.h>
>>> +#include <linux/lsm_hooks.h>
>>> +
>>> +#ifdef CONFIG_SECURITY_BPF
>>> +
>>> +/* Mutable hooks defined at runtime and executed after all the statically
>>> + * defined LSM hooks.
>>> + */
>>> +extern struct security_hook_heads bpf_lsm_hook_heads;
>>> +
>>> +int bpf_lsm_srcu_read_lock(void);
>>> +void bpf_lsm_srcu_read_unlock(int idx);
>>> +
>>> +#define CALL_BPF_LSM_VOID_HOOKS(FUNC, ...)			\
>>> +	do {							\
>>> +		struct security_hook_list *P;			\
>>> +		int _idx;					\
>>> +								\
>>> +		if (hlist_empty(&bpf_lsm_hook_heads.FUNC))	\
>>> +			break;					\
>>> +								\
>>> +		_idx = bpf_lsm_srcu_read_lock();		\
>>> +		hlist_for_each_entry(P, &bpf_lsm_hook_heads.FUNC, list) \
>>> +			P->hook.FUNC(__VA_ARGS__);		\
>>> +		bpf_lsm_srcu_read_unlock(_idx);			\
>>> +	} while (0)
>>> +
>>> +#define CALL_BPF_LSM_INT_HOOKS(FUNC, ...) ({			\
>>> +	int _ret = 0;						\
>>> +	do {							\
>>> +		struct security_hook_list *P;			\
>>> +		int _idx;					\
>>> +								\
>>> +		if (hlist_empty(&bpf_lsm_hook_heads.FUNC))	\
>>> +			break;					\
>>> +								\
>>> +		_idx = bpf_lsm_srcu_read_lock();		\
>>> +								\
>>> +		hlist_for_each_entry(P,				\
>>> +			&bpf_lsm_hook_heads.FUNC, list) {	\
>>> +			_ret = P->hook.FUNC(__VA_ARGS__);		\
>>> +			if (_ret && IS_ENABLED(CONFIG_SECURITY_BPF_ENFORCE)) \
>>> +				break;				\
>>> +		}						\
>>> +		bpf_lsm_srcu_read_unlock(_idx);			\
>>> +	} while (0);						\
>>> +	IS_ENABLED(CONFIG_SECURITY_BPF_ENFORCE) ? _ret : 0;	\
>>> +})
>>> +
>>> +#else /* !CONFIG_SECURITY_BPF */
>>> +
>>> +#define CALL_BPF_LSM_INT_HOOKS(FUNC, ...) (0)
>>> +#define CALL_BPF_LSM_VOID_HOOKS(...)
>>> +
>>> +static inline int bpf_lsm_srcu_read_lock(void)
>>> +{
>>> +	return 0;
>>> +}
>>> +static inline void bpf_lsm_srcu_read_unlock(int idx) {}
>>> +
>>> +#endif /* CONFIG_SECURITY_BPF */
>>> +
>>> +#endif /* _LINUX_BPF_LSM_H */
>>> diff --git a/security/bpf/Kconfig b/security/bpf/Kconfig
>>> index a5f6c67ae526..595e4ad597ae 100644
>>> --- a/security/bpf/Kconfig
>>> +++ b/security/bpf/Kconfig
>>> @@ -6,6 +6,7 @@ config SECURITY_BPF
>>>  	bool "BPF-based MAC and audit policy"
>>>  	depends on SECURITY
>>>  	depends on BPF_SYSCALL
>>> +	depends on SRCU
>>>  	help
>>>  	  This enables instrumentation of the security hooks with
>>>  	  eBPF programs.
>>> diff --git a/security/bpf/Makefile b/security/bpf/Makefile
>>> index c78a8a056e7e..c526927c337d 100644
>>> --- a/security/bpf/Makefile
>>> +++ b/security/bpf/Makefile
>>> @@ -2,4 +2,4 @@
>>>  #
>>>  # Copyright 2019 Google LLC.
>>>  
>>> -obj-$(CONFIG_SECURITY_BPF) := lsm.o ops.o
>>> +obj-$(CONFIG_SECURITY_BPF) := lsm.o ops.o hooks.o
>>> diff --git a/security/bpf/hooks.c b/security/bpf/hooks.c
>>> new file mode 100644
>>> index 000000000000..b123d9cb4cd4
>>> --- /dev/null
>>> +++ b/security/bpf/hooks.c
>>> @@ -0,0 +1,20 @@
>>> +// SPDX-License-Identifier: GPL-2.0
>>> +
>>> +/*
>>> + * Copyright 2019 Google LLC.
>>> + */
>>> +
>>> +#include <linux/bpf_lsm.h>
>>> +#include <linux/srcu.h>
>>> +
>>> +DEFINE_STATIC_SRCU(security_hook_srcu);
>>> +
>>> +int bpf_lsm_srcu_read_lock(void)
>>> +{
>>> +	return srcu_read_lock(&security_hook_srcu);
>>> +}
>>> +
>>> +void bpf_lsm_srcu_read_unlock(int idx)
>>> +{
>>> +	return srcu_read_unlock(&security_hook_srcu, idx);
>>> +}
>>> diff --git a/security/bpf/lsm.c b/security/bpf/lsm.c
>>> index dc9ac03c7aa0..a25a068e1781 100644
>>> --- a/security/bpf/lsm.c
>>> +++ b/security/bpf/lsm.c
>>> @@ -4,6 +4,7 @@
>>>   * Copyright 2019 Google LLC.
>>>   */
>>>  
>>> +#include <linux/bpf_lsm.h>
>>>  #include <linux/lsm_hooks.h>
>>>  
>>>  /* This is only for internal hooks, always statically shipped as part of the
>>> @@ -12,6 +13,12 @@
>>>   */
>>>  static struct security_hook_list bpf_lsm_hooks[] __lsm_ro_after_init = {};
>>>  
>>> +/* Security hooks registered dynamically by the BPF LSM and must be accessed
>>> + * by holding bpf_lsm_srcu_read_lock and bpf_lsm_srcu_read_unlock. The mutable
>>> + * hooks dynamically allocated by the BPF LSM are appeneded here.
>>> + */
>>> +struct security_hook_heads bpf_lsm_hook_heads;
>>> +
>>>  static int __init bpf_lsm_init(void)
>>>  {
>>>  	security_add_hooks(bpf_lsm_hooks, ARRAY_SIZE(bpf_lsm_hooks), "bpf");
>>> diff --git a/security/security.c b/security/security.c
>>> index 30a8aa700557..95a46ca25dcd 100644
>>> --- a/security/security.c
>>> +++ b/security/security.c
>>> @@ -27,6 +27,7 @@
>>>  #include <linux/backing-dev.h>
>>>  #include <linux/string.h>
>>>  #include <linux/msg.h>
>>> +#include <linux/bpf_lsm.h>
>>>  #include <net/flow.h>
>>>  
>>>  #define MAX_LSM_EVM_XATTR	2
>>> @@ -657,20 +658,22 @@ static void __init lsm_early_task(struct task_struct *task)
>>>  								\
>>>  		hlist_for_each_entry(P, &security_hook_heads.FUNC, list) \
>>>  			P->hook.FUNC(__VA_ARGS__);		\
>>> +		CALL_BPF_LSM_VOID_HOOKS(FUNC, __VA_ARGS__);	\
>> I'm sorry if I wasn't clear on the v2 review.
>> This does not belong in the infrastructure. You should be
>> doing all the bpf_lsm hook processing in you module.
>> bpf_lsm_task_alloc() should loop though all the bpf
>> task_alloc hooks if they have to be handled differently
>> from "normal" LSM hooks.
> The BPF LSM does not define static hooks (the ones registered to
> security_hook_heads in security.c with __lsm_ro_after_init) for each
> LSM hook. If it tries to do that one ends with what was in v1:
>
>   https://lore.kernel.org/bpf/20191220154208.15895-7-kpsingh@chromium.org
>
> This gets quite ugly (security/bpf/hooks.h from v1) and was noted by
> the BPF maintainers:
>
>   https://lore.kernel.org/bpf/20191222012722.gdqhppxpfmqfqbld@ast-mbp.dhcp.thefacebook.com/
>
> As I mentioned, some of the ideas we used here are based on:
>
>   https://lore.kernel.org/lkml/20180408065916.GA2832@ircssh-2.c.rugged-nimbus-611.internal
>
> Which gave each LSM the ability to add mutable hooks at runtime. If
> you prefer we can make this generic and allow the LSMs to register
> mutable hooks with the BPF LSM be the only LSM that uses it (and
> enforce it with a whitelist).
>
> Would this generic approach be something you would consider better
> than just calling the BPF mutable hooks directly?

What I think makes sense is for the BPF LSM to have a hook
for each of the interfaces and for that hook to handle the
mutable list for the interface. If BPF not included there
will be no mutable hooks. 

Yes, your v1 got this right.

>
> - KP
>
>>>  	} while (0)
>>>  
>>> -#define call_int_hook(FUNC, IRC, ...) ({			\
>>> -	int RC = IRC;						\
>>> -	do {							\
>>> -		struct security_hook_list *P;			\
>>> -								\
>>> +#define call_int_hook(FUNC, IRC, ...) ({				\
>>> +	int RC = IRC;							\
>>> +	do {								\
>>> +		struct security_hook_list *P;				\
>>>  		hlist_for_each_entry(P, &security_hook_heads.FUNC, list) { \
>>> -			RC = P->hook.FUNC(__VA_ARGS__);		\
>>> -			if (RC != 0)				\
>>> -				break;				\
>>> -		}						\
>>> -	} while (0);						\
>>> -	RC;							\
>>> +			RC = P->hook.FUNC(__VA_ARGS__);			\
>>> +			if (RC != 0)					\
>>> +				break;					\
>>> +		}							\
>>> +		if (RC == 0)						\
>>> +			RC = CALL_BPF_LSM_INT_HOOKS(FUNC, __VA_ARGS__);	\
>>> +	} while (0);							\
>>> +	RC;								\
>>>  })
>>>  
>>>  /* Security operations */
KP Singh Jan. 23, 2020, 10:24 p.m. UTC | #4
On 23-Jan 11:09, Casey Schaufler wrote:
> On 1/23/2020 9:59 AM, KP Singh wrote:
> > On 23-Jan 09:03, Casey Schaufler wrote:
> >> On 1/23/2020 7:24 AM, KP Singh wrote:
> >>> From: KP Singh <kpsingh@google.com>
> >>>
> >>> - The list of hooks registered by an LSM is currently immutable as they
> >>>   are declared with __lsm_ro_after_init and they are attached to a
> >>>   security_hook_heads struct.
> >>> - For the BPF LSM we need to de/register the hooks at runtime. Making
> >>>   the existing security_hook_heads mutable broadens an
> >>>   attack vector, so a separate security_hook_heads is added for only
> >>>   those that ~must~ be mutable.
> >>> - These mutable hooks are run only after all the static hooks have
> >>>   successfully executed.
> >>>
> >>> This is based on the ideas discussed in:
> >>>
> >>>   https://lore.kernel.org/lkml/20180408065916.GA2832@ircssh-2.c.rugged-nimbus-611.internal
> >>>
> >>> Reviewed-by: Brendan Jackman <jackmanb@google.com>
> >>> Reviewed-by: Florent Revest <revest@google.com>
> >>> Reviewed-by: Thomas Garnier <thgarnie@google.com>
> >>> Signed-off-by: KP Singh <kpsingh@google.com>
> >>> ---
> >>>  MAINTAINERS             |  1 +
> >>>  include/linux/bpf_lsm.h | 72 +++++++++++++++++++++++++++++++++++++++++
> >>>  security/bpf/Kconfig    |  1 +
> >>>  security/bpf/Makefile   |  2 +-
> >>>  security/bpf/hooks.c    | 20 ++++++++++++
> >>>  security/bpf/lsm.c      |  7 ++++
> >>>  security/security.c     | 25 +++++++-------
> >>>  7 files changed, 116 insertions(+), 12 deletions(-)
> >>>  create mode 100644 include/linux/bpf_lsm.h
> >>>  create mode 100644 security/bpf/hooks.c
> >>>
> >>> diff --git a/MAINTAINERS b/MAINTAINERS
> >>> index e2b7f76a1a70..c606b3d89992 100644
> >>> --- a/MAINTAINERS
> >>> +++ b/MAINTAINERS
> >>> @@ -3209,6 +3209,7 @@ L:	linux-security-module@vger.kernel.org
> >>>  L:	bpf@vger.kernel.org
> >>>  S:	Maintained
> >>>  F:	security/bpf/
> >>> +F:	include/linux/bpf_lsm.h
> >>>  
> >>>  BROADCOM B44 10/100 ETHERNET DRIVER
> >>>  M:	Michael Chan <michael.chan@broadcom.com>
> >>> diff --git a/include/linux/bpf_lsm.h b/include/linux/bpf_lsm.h
> >>> new file mode 100644
> >>> index 000000000000..57c20b2cd2f4
> >>> --- /dev/null
> >>> +++ b/include/linux/bpf_lsm.h
> >>> @@ -0,0 +1,72 @@
> >>> +/* SPDX-License-Identifier: GPL-2.0 */
> >>> +
> >>> +/*
> >>> + * Copyright 2019 Google LLC.
> >>> + */
> >>> +
> >>> +#ifndef _LINUX_BPF_LSM_H
> >>> +#define _LINUX_BPF_LSM_H
> >>> +
> >>> +#include <linux/bpf.h>
> >>> +#include <linux/lsm_hooks.h>
> >>> +
> >>> +#ifdef CONFIG_SECURITY_BPF
> >>> +
> >>> +/* Mutable hooks defined at runtime and executed after all the statically
> >>> + * defined LSM hooks.
> >>> + */
> >>> +extern struct security_hook_heads bpf_lsm_hook_heads;
> >>> +
> >>> +int bpf_lsm_srcu_read_lock(void);
> >>> +void bpf_lsm_srcu_read_unlock(int idx);
> >>> +
> >>> +#define CALL_BPF_LSM_VOID_HOOKS(FUNC, ...)			\
> >>> +	do {							\
> >>> +		struct security_hook_list *P;			\
> >>> +		int _idx;					\
> >>> +								\
> >>> +		if (hlist_empty(&bpf_lsm_hook_heads.FUNC))	\
> >>> +			break;					\
> >>> +								\
> >>> +		_idx = bpf_lsm_srcu_read_lock();		\
> >>> +		hlist_for_each_entry(P, &bpf_lsm_hook_heads.FUNC, list) \
> >>> +			P->hook.FUNC(__VA_ARGS__);		\
> >>> +		bpf_lsm_srcu_read_unlock(_idx);			\
> >>> +	} while (0)
> >>> +
> >>> +#define CALL_BPF_LSM_INT_HOOKS(FUNC, ...) ({			\
> >>> +	int _ret = 0;						\
> >>> +	do {							\
> >>> +		struct security_hook_list *P;			\
> >>> +		int _idx;					\
> >>> +								\
> >>> +		if (hlist_empty(&bpf_lsm_hook_heads.FUNC))	\
> >>> +			break;					\
> >>> +								\
> >>> +		_idx = bpf_lsm_srcu_read_lock();		\
> >>> +								\
> >>> +		hlist_for_each_entry(P,				\
> >>> +			&bpf_lsm_hook_heads.FUNC, list) {	\
> >>> +			_ret = P->hook.FUNC(__VA_ARGS__);		\
> >>> +			if (_ret && IS_ENABLED(CONFIG_SECURITY_BPF_ENFORCE)) \
> >>> +				break;				\
> >>> +		}						\
> >>> +		bpf_lsm_srcu_read_unlock(_idx);			\
> >>> +	} while (0);						\
> >>> +	IS_ENABLED(CONFIG_SECURITY_BPF_ENFORCE) ? _ret : 0;	\
> >>> +})
> >>> +
> >>> +#else /* !CONFIG_SECURITY_BPF */
> >>> +
> >>> +#define CALL_BPF_LSM_INT_HOOKS(FUNC, ...) (0)
> >>> +#define CALL_BPF_LSM_VOID_HOOKS(...)
> >>> +
> >>> +static inline int bpf_lsm_srcu_read_lock(void)
> >>> +{
> >>> +	return 0;
> >>> +}
> >>> +static inline void bpf_lsm_srcu_read_unlock(int idx) {}
> >>> +
> >>> +#endif /* CONFIG_SECURITY_BPF */
> >>> +
> >>> +#endif /* _LINUX_BPF_LSM_H */
> >>> diff --git a/security/bpf/Kconfig b/security/bpf/Kconfig
> >>> index a5f6c67ae526..595e4ad597ae 100644
> >>> --- a/security/bpf/Kconfig
> >>> +++ b/security/bpf/Kconfig
> >>> @@ -6,6 +6,7 @@ config SECURITY_BPF
> >>>  	bool "BPF-based MAC and audit policy"
> >>>  	depends on SECURITY
> >>>  	depends on BPF_SYSCALL
> >>> +	depends on SRCU
> >>>  	help
> >>>  	  This enables instrumentation of the security hooks with
> >>>  	  eBPF programs.
> >>> diff --git a/security/bpf/Makefile b/security/bpf/Makefile
> >>> index c78a8a056e7e..c526927c337d 100644
> >>> --- a/security/bpf/Makefile
> >>> +++ b/security/bpf/Makefile
> >>> @@ -2,4 +2,4 @@
> >>>  #
> >>>  # Copyright 2019 Google LLC.
> >>>  
> >>> -obj-$(CONFIG_SECURITY_BPF) := lsm.o ops.o
> >>> +obj-$(CONFIG_SECURITY_BPF) := lsm.o ops.o hooks.o
> >>> diff --git a/security/bpf/hooks.c b/security/bpf/hooks.c
> >>> new file mode 100644
> >>> index 000000000000..b123d9cb4cd4
> >>> --- /dev/null
> >>> +++ b/security/bpf/hooks.c
> >>> @@ -0,0 +1,20 @@
> >>> +// SPDX-License-Identifier: GPL-2.0
> >>> +
> >>> +/*
> >>> + * Copyright 2019 Google LLC.
> >>> + */
> >>> +
> >>> +#include <linux/bpf_lsm.h>
> >>> +#include <linux/srcu.h>
> >>> +
> >>> +DEFINE_STATIC_SRCU(security_hook_srcu);
> >>> +
> >>> +int bpf_lsm_srcu_read_lock(void)
> >>> +{
> >>> +	return srcu_read_lock(&security_hook_srcu);
> >>> +}
> >>> +
> >>> +void bpf_lsm_srcu_read_unlock(int idx)
> >>> +{
> >>> +	return srcu_read_unlock(&security_hook_srcu, idx);
> >>> +}
> >>> diff --git a/security/bpf/lsm.c b/security/bpf/lsm.c
> >>> index dc9ac03c7aa0..a25a068e1781 100644
> >>> --- a/security/bpf/lsm.c
> >>> +++ b/security/bpf/lsm.c
> >>> @@ -4,6 +4,7 @@
> >>>   * Copyright 2019 Google LLC.
> >>>   */
> >>>  
> >>> +#include <linux/bpf_lsm.h>
> >>>  #include <linux/lsm_hooks.h>
> >>>  
> >>>  /* This is only for internal hooks, always statically shipped as part of the
> >>> @@ -12,6 +13,12 @@
> >>>   */
> >>>  static struct security_hook_list bpf_lsm_hooks[] __lsm_ro_after_init = {};
> >>>  
> >>> +/* Security hooks registered dynamically by the BPF LSM and must be accessed
> >>> + * by holding bpf_lsm_srcu_read_lock and bpf_lsm_srcu_read_unlock. The mutable
> >>> + * hooks dynamically allocated by the BPF LSM are appeneded here.
> >>> + */
> >>> +struct security_hook_heads bpf_lsm_hook_heads;
> >>> +
> >>>  static int __init bpf_lsm_init(void)
> >>>  {
> >>>  	security_add_hooks(bpf_lsm_hooks, ARRAY_SIZE(bpf_lsm_hooks), "bpf");
> >>> diff --git a/security/security.c b/security/security.c
> >>> index 30a8aa700557..95a46ca25dcd 100644
> >>> --- a/security/security.c
> >>> +++ b/security/security.c
> >>> @@ -27,6 +27,7 @@
> >>>  #include <linux/backing-dev.h>
> >>>  #include <linux/string.h>
> >>>  #include <linux/msg.h>
> >>> +#include <linux/bpf_lsm.h>
> >>>  #include <net/flow.h>
> >>>  
> >>>  #define MAX_LSM_EVM_XATTR	2
> >>> @@ -657,20 +658,22 @@ static void __init lsm_early_task(struct task_struct *task)
> >>>  								\
> >>>  		hlist_for_each_entry(P, &security_hook_heads.FUNC, list) \
> >>>  			P->hook.FUNC(__VA_ARGS__);		\
> >>> +		CALL_BPF_LSM_VOID_HOOKS(FUNC, __VA_ARGS__);	\
> >> I'm sorry if I wasn't clear on the v2 review.
> >> This does not belong in the infrastructure. You should be
> >> doing all the bpf_lsm hook processing in you module.
> >> bpf_lsm_task_alloc() should loop though all the bpf
> >> task_alloc hooks if they have to be handled differently
> >> from "normal" LSM hooks.
> > The BPF LSM does not define static hooks (the ones registered to
> > security_hook_heads in security.c with __lsm_ro_after_init) for each
> > LSM hook. If it tries to do that one ends with what was in v1:
> >
> >   https://lore.kernel.org/bpf/20191220154208.15895-7-kpsingh@chromium.org
> >
> > This gets quite ugly (security/bpf/hooks.h from v1) and was noted by
> > the BPF maintainers:
> >
> >   https://lore.kernel.org/bpf/20191222012722.gdqhppxpfmqfqbld@ast-mbp.dhcp.thefacebook.com/
> >
> > As I mentioned, some of the ideas we used here are based on:
> >
> >   https://lore.kernel.org/lkml/20180408065916.GA2832@ircssh-2.c.rugged-nimbus-611.internal
> >
> > Which gave each LSM the ability to add mutable hooks at runtime. If
> > you prefer we can make this generic and allow the LSMs to register
> > mutable hooks with the BPF LSM be the only LSM that uses it (and
> > enforce it with a whitelist).
> >
> > Would this generic approach be something you would consider better
> > than just calling the BPF mutable hooks directly?
> 
> What I think makes sense is for the BPF LSM to have a hook
> for each of the interfaces and for that hook to handle the
> mutable list for the interface. If BPF not included there
> will be no mutable hooks. 
> 
> Yes, your v1 got this right.

BPF LSM does provide mutable LSM hooks and it ends up being simpler
to implement/maintain when they are treated as such.

 The other approaches which we have considered are:

- Using macro magic to allocate static hook bodies which call eBPF
  programs as implemented in v1. This entails maintaining a
  separate list of LSM hooks in the BPF LSM which is evident from the
  giant security/bpf/include/hooks.h in:

  https://lore.kernel.org/bpf/20191220154208.15895-7-kpsingh@chromium.org

- Another approach one can think of is to allocate all the trampoline
  images (one page each) at __init and update these images to invoke
  BPF programs when they are attached.

Both these approaches seem to suffer from the downside of doing more
work when it's not really needed (i.e. doing prep work for hooks which
have no eBPF programs attached) and they appear to to mask the fact
that what the BPF LSM provides is actually mutable LSM hooks by
allocating static wrappers around mutable callbacks.

Are there other downsides apart from the fact we have an explicit call
to the mutable hooks in the LSM code? (Note that we want to have these
mutable hooks run after all the static LSM hooks so ordering
would still end up being LSM_ORDER_LAST)

It would be great to hear the maintainers' perspective based on the
trade-offs involved with the different approaches discussed.

We are happy to adapt our approach based on the consensus we reach
here.

- KP

> 
> >
> > - KP
> >
> >>>  	} while (0)
> >>>  
> >>> -#define call_int_hook(FUNC, IRC, ...) ({			\
> >>> -	int RC = IRC;						\
> >>> -	do {							\
> >>> -		struct security_hook_list *P;			\
> >>> -								\
> >>> +#define call_int_hook(FUNC, IRC, ...) ({				\
> >>> +	int RC = IRC;							\
> >>> +	do {								\
> >>> +		struct security_hook_list *P;				\
> >>>  		hlist_for_each_entry(P, &security_hook_heads.FUNC, list) { \
> >>> -			RC = P->hook.FUNC(__VA_ARGS__);		\
> >>> -			if (RC != 0)				\
> >>> -				break;				\
> >>> -		}						\
> >>> -	} while (0);						\
> >>> -	RC;							\
> >>> +			RC = P->hook.FUNC(__VA_ARGS__);			\
> >>> +			if (RC != 0)					\
> >>> +				break;					\
> >>> +		}							\
> >>> +		if (RC == 0)						\
> >>> +			RC = CALL_BPF_LSM_INT_HOOKS(FUNC, __VA_ARGS__);	\
> >>> +	} while (0);							\
> >>> +	RC;								\
> >>>  })
> >>>  
> >>>  /* Security operations */
>
Casey Schaufler Jan. 23, 2020, 11:50 p.m. UTC | #5
On 1/23/2020 2:24 PM, KP Singh wrote:
> On 23-Jan 11:09, Casey Schaufler wrote:
>> On 1/23/2020 9:59 AM, KP Singh wrote:
>>> On 23-Jan 09:03, Casey Schaufler wrote:
>>>> On 1/23/2020 7:24 AM, KP Singh wrote:
>>>>> From: KP Singh <kpsingh@google.com>
>>>>>
>>>>> - The list of hooks registered by an LSM is currently immutable as they
>>>>>   are declared with __lsm_ro_after_init and they are attached to a
>>>>>   security_hook_heads struct.
>>>>> - For the BPF LSM we need to de/register the hooks at runtime. Making
>>>>>   the existing security_hook_heads mutable broadens an
>>>>>   attack vector, so a separate security_hook_heads is added for only
>>>>>   those that ~must~ be mutable.
>>>>> - These mutable hooks are run only after all the static hooks have
>>>>>   successfully executed.
>>>>>
>>>>> This is based on the ideas discussed in:
>>>>>
>>>>>   https://lore.kernel.org/lkml/20180408065916.GA2832@ircssh-2.c.rugged-nimbus-611.internal
>>>>>
>>>>> Reviewed-by: Brendan Jackman <jackmanb@google.com>
>>>>> Reviewed-by: Florent Revest <revest@google.com>
>>>>> Reviewed-by: Thomas Garnier <thgarnie@google.com>
>>>>> Signed-off-by: KP Singh <kpsingh@google.com>
>>>>> ---
>>>>>  MAINTAINERS             |  1 +
>>>>>  include/linux/bpf_lsm.h | 72 +++++++++++++++++++++++++++++++++++++++++
>>>>>  security/bpf/Kconfig    |  1 +
>>>>>  security/bpf/Makefile   |  2 +-
>>>>>  security/bpf/hooks.c    | 20 ++++++++++++
>>>>>  security/bpf/lsm.c      |  7 ++++
>>>>>  security/security.c     | 25 +++++++-------
>>>>>  7 files changed, 116 insertions(+), 12 deletions(-)
>>>>>  create mode 100644 include/linux/bpf_lsm.h
>>>>>  create mode 100644 security/bpf/hooks.c
>>>>>
>>>>> diff --git a/MAINTAINERS b/MAINTAINERS
>>>>> index e2b7f76a1a70..c606b3d89992 100644
>>>>> --- a/MAINTAINERS
>>>>> +++ b/MAINTAINERS
>>>>> @@ -3209,6 +3209,7 @@ L:	linux-security-module@vger.kernel.org
>>>>>  L:	bpf@vger.kernel.org
>>>>>  S:	Maintained
>>>>>  F:	security/bpf/
>>>>> +F:	include/linux/bpf_lsm.h
>>>>>  
>>>>>  BROADCOM B44 10/100 ETHERNET DRIVER
>>>>>  M:	Michael Chan <michael.chan@broadcom.com>
>>>>> diff --git a/include/linux/bpf_lsm.h b/include/linux/bpf_lsm.h
>>>>> new file mode 100644
>>>>> index 000000000000..57c20b2cd2f4
>>>>> --- /dev/null
>>>>> +++ b/include/linux/bpf_lsm.h
>>>>> @@ -0,0 +1,72 @@
>>>>> +/* SPDX-License-Identifier: GPL-2.0 */
>>>>> +
>>>>> +/*
>>>>> + * Copyright 2019 Google LLC.
>>>>> + */
>>>>> +
>>>>> +#ifndef _LINUX_BPF_LSM_H
>>>>> +#define _LINUX_BPF_LSM_H
>>>>> +
>>>>> +#include <linux/bpf.h>
>>>>> +#include <linux/lsm_hooks.h>
>>>>> +
>>>>> +#ifdef CONFIG_SECURITY_BPF
>>>>> +
>>>>> +/* Mutable hooks defined at runtime and executed after all the statically
>>>>> + * defined LSM hooks.
>>>>> + */
>>>>> +extern struct security_hook_heads bpf_lsm_hook_heads;
>>>>> +
>>>>> +int bpf_lsm_srcu_read_lock(void);
>>>>> +void bpf_lsm_srcu_read_unlock(int idx);
>>>>> +
>>>>> +#define CALL_BPF_LSM_VOID_HOOKS(FUNC, ...)			\
>>>>> +	do {							\
>>>>> +		struct security_hook_list *P;			\
>>>>> +		int _idx;					\
>>>>> +								\
>>>>> +		if (hlist_empty(&bpf_lsm_hook_heads.FUNC))	\
>>>>> +			break;					\
>>>>> +								\
>>>>> +		_idx = bpf_lsm_srcu_read_lock();		\
>>>>> +		hlist_for_each_entry(P, &bpf_lsm_hook_heads.FUNC, list) \
>>>>> +			P->hook.FUNC(__VA_ARGS__);		\
>>>>> +		bpf_lsm_srcu_read_unlock(_idx);			\
>>>>> +	} while (0)
>>>>> +
>>>>> +#define CALL_BPF_LSM_INT_HOOKS(FUNC, ...) ({			\
>>>>> +	int _ret = 0;						\
>>>>> +	do {							\
>>>>> +		struct security_hook_list *P;			\
>>>>> +		int _idx;					\
>>>>> +								\
>>>>> +		if (hlist_empty(&bpf_lsm_hook_heads.FUNC))	\
>>>>> +			break;					\
>>>>> +								\
>>>>> +		_idx = bpf_lsm_srcu_read_lock();		\
>>>>> +								\
>>>>> +		hlist_for_each_entry(P,				\
>>>>> +			&bpf_lsm_hook_heads.FUNC, list) {	\
>>>>> +			_ret = P->hook.FUNC(__VA_ARGS__);		\
>>>>> +			if (_ret && IS_ENABLED(CONFIG_SECURITY_BPF_ENFORCE)) \
>>>>> +				break;				\
>>>>> +		}						\
>>>>> +		bpf_lsm_srcu_read_unlock(_idx);			\
>>>>> +	} while (0);						\
>>>>> +	IS_ENABLED(CONFIG_SECURITY_BPF_ENFORCE) ? _ret : 0;	\
>>>>> +})
>>>>> +
>>>>> +#else /* !CONFIG_SECURITY_BPF */
>>>>> +
>>>>> +#define CALL_BPF_LSM_INT_HOOKS(FUNC, ...) (0)
>>>>> +#define CALL_BPF_LSM_VOID_HOOKS(...)
>>>>> +
>>>>> +static inline int bpf_lsm_srcu_read_lock(void)
>>>>> +{
>>>>> +	return 0;
>>>>> +}
>>>>> +static inline void bpf_lsm_srcu_read_unlock(int idx) {}
>>>>> +
>>>>> +#endif /* CONFIG_SECURITY_BPF */
>>>>> +
>>>>> +#endif /* _LINUX_BPF_LSM_H */
>>>>> diff --git a/security/bpf/Kconfig b/security/bpf/Kconfig
>>>>> index a5f6c67ae526..595e4ad597ae 100644
>>>>> --- a/security/bpf/Kconfig
>>>>> +++ b/security/bpf/Kconfig
>>>>> @@ -6,6 +6,7 @@ config SECURITY_BPF
>>>>>  	bool "BPF-based MAC and audit policy"
>>>>>  	depends on SECURITY
>>>>>  	depends on BPF_SYSCALL
>>>>> +	depends on SRCU
>>>>>  	help
>>>>>  	  This enables instrumentation of the security hooks with
>>>>>  	  eBPF programs.
>>>>> diff --git a/security/bpf/Makefile b/security/bpf/Makefile
>>>>> index c78a8a056e7e..c526927c337d 100644
>>>>> --- a/security/bpf/Makefile
>>>>> +++ b/security/bpf/Makefile
>>>>> @@ -2,4 +2,4 @@
>>>>>  #
>>>>>  # Copyright 2019 Google LLC.
>>>>>  
>>>>> -obj-$(CONFIG_SECURITY_BPF) := lsm.o ops.o
>>>>> +obj-$(CONFIG_SECURITY_BPF) := lsm.o ops.o hooks.o
>>>>> diff --git a/security/bpf/hooks.c b/security/bpf/hooks.c
>>>>> new file mode 100644
>>>>> index 000000000000..b123d9cb4cd4
>>>>> --- /dev/null
>>>>> +++ b/security/bpf/hooks.c
>>>>> @@ -0,0 +1,20 @@
>>>>> +// SPDX-License-Identifier: GPL-2.0
>>>>> +
>>>>> +/*
>>>>> + * Copyright 2019 Google LLC.
>>>>> + */
>>>>> +
>>>>> +#include <linux/bpf_lsm.h>
>>>>> +#include <linux/srcu.h>
>>>>> +
>>>>> +DEFINE_STATIC_SRCU(security_hook_srcu);
>>>>> +
>>>>> +int bpf_lsm_srcu_read_lock(void)
>>>>> +{
>>>>> +	return srcu_read_lock(&security_hook_srcu);
>>>>> +}
>>>>> +
>>>>> +void bpf_lsm_srcu_read_unlock(int idx)
>>>>> +{
>>>>> +	return srcu_read_unlock(&security_hook_srcu, idx);
>>>>> +}
>>>>> diff --git a/security/bpf/lsm.c b/security/bpf/lsm.c
>>>>> index dc9ac03c7aa0..a25a068e1781 100644
>>>>> --- a/security/bpf/lsm.c
>>>>> +++ b/security/bpf/lsm.c
>>>>> @@ -4,6 +4,7 @@
>>>>>   * Copyright 2019 Google LLC.
>>>>>   */
>>>>>  
>>>>> +#include <linux/bpf_lsm.h>
>>>>>  #include <linux/lsm_hooks.h>
>>>>>  
>>>>>  /* This is only for internal hooks, always statically shipped as part of the
>>>>> @@ -12,6 +13,12 @@
>>>>>   */
>>>>>  static struct security_hook_list bpf_lsm_hooks[] __lsm_ro_after_init = {};
>>>>>  
>>>>> +/* Security hooks registered dynamically by the BPF LSM and must be accessed
>>>>> + * by holding bpf_lsm_srcu_read_lock and bpf_lsm_srcu_read_unlock. The mutable
>>>>> + * hooks dynamically allocated by the BPF LSM are appeneded here.
>>>>> + */
>>>>> +struct security_hook_heads bpf_lsm_hook_heads;
>>>>> +
>>>>>  static int __init bpf_lsm_init(void)
>>>>>  {
>>>>>  	security_add_hooks(bpf_lsm_hooks, ARRAY_SIZE(bpf_lsm_hooks), "bpf");
>>>>> diff --git a/security/security.c b/security/security.c
>>>>> index 30a8aa700557..95a46ca25dcd 100644
>>>>> --- a/security/security.c
>>>>> +++ b/security/security.c
>>>>> @@ -27,6 +27,7 @@
>>>>>  #include <linux/backing-dev.h>
>>>>>  #include <linux/string.h>
>>>>>  #include <linux/msg.h>
>>>>> +#include <linux/bpf_lsm.h>
>>>>>  #include <net/flow.h>
>>>>>  
>>>>>  #define MAX_LSM_EVM_XATTR	2
>>>>> @@ -657,20 +658,22 @@ static void __init lsm_early_task(struct task_struct *task)
>>>>>  								\
>>>>>  		hlist_for_each_entry(P, &security_hook_heads.FUNC, list) \
>>>>>  			P->hook.FUNC(__VA_ARGS__);		\
>>>>> +		CALL_BPF_LSM_VOID_HOOKS(FUNC, __VA_ARGS__);	\
>>>> I'm sorry if I wasn't clear on the v2 review.
>>>> This does not belong in the infrastructure. You should be
>>>> doing all the bpf_lsm hook processing in you module.
>>>> bpf_lsm_task_alloc() should loop though all the bpf
>>>> task_alloc hooks if they have to be handled differently
>>>> from "normal" LSM hooks.
>>> The BPF LSM does not define static hooks (the ones registered to
>>> security_hook_heads in security.c with __lsm_ro_after_init) for each
>>> LSM hook. If it tries to do that one ends with what was in v1:
>>>
>>>   https://lore.kernel.org/bpf/20191220154208.15895-7-kpsingh@chromium.org
>>>
>>> This gets quite ugly (security/bpf/hooks.h from v1) and was noted by
>>> the BPF maintainers:
>>>
>>>   https://lore.kernel.org/bpf/20191222012722.gdqhppxpfmqfqbld@ast-mbp.dhcp.thefacebook.com/
>>>
>>> As I mentioned, some of the ideas we used here are based on:
>>>
>>>   https://lore.kernel.org/lkml/20180408065916.GA2832@ircssh-2.c.rugged-nimbus-611.internal
>>>
>>> Which gave each LSM the ability to add mutable hooks at runtime. If
>>> you prefer we can make this generic and allow the LSMs to register
>>> mutable hooks with the BPF LSM be the only LSM that uses it (and
>>> enforce it with a whitelist).
>>>
>>> Would this generic approach be something you would consider better
>>> than just calling the BPF mutable hooks directly?
>> What I think makes sense is for the BPF LSM to have a hook
>> for each of the interfaces and for that hook to handle the
>> mutable list for the interface. If BPF not included there
>> will be no mutable hooks. 
>>
>> Yes, your v1 got this right.
> BPF LSM does provide mutable LSM hooks and it ends up being simpler
> to implement/maintain when they are treated as such.

If you want to put mutable hook handling in the infrastructure
you need to make it general mutable hook handling as opposed to
BPF hook handling. I don't know if that would be acceptable for
all the reasons called out about dynamic module loading.

>
>  The other approaches which we have considered are:
>
> - Using macro magic to allocate static hook bodies which call eBPF
>   programs as implemented in v1. This entails maintaining a
>   separate list of LSM hooks in the BPF LSM which is evident from the
>   giant security/bpf/include/hooks.h in:
>
>   https://lore.kernel.org/bpf/20191220154208.15895-7-kpsingh@chromium.org

I haven't put much though into how you might make that cleaner,
but I don't see how you can expect any approach to turn out
smaller than or less ugly than security.c.

>
> - Another approach one can think of is to allocate all the trampoline
>   images (one page each) at __init and update these images to invoke
>   BPF programs when they are attached.
>
> Both these approaches seem to suffer from the downside of doing more
> work when it's not really needed (i.e. doing prep work for hooks which
> have no eBPF programs attached) and they appear to to mask the fact
> that what the BPF LSM provides is actually mutable LSM hooks by
> allocating static wrappers around mutable callbacks.

That's a "feature" of the LSM infrastructure. If you're not using a hook
you just don't provide one. It is a artifact of your intent of providing
a general extension that requires you provide a hook which may do nothing
for every interface.

>
> Are there other downsides apart from the fact we have an explicit call
> to the mutable hooks in the LSM code? (Note that we want to have these
> mutable hooks run after all the static LSM hooks so ordering
> would still end up being LSM_ORDER_LAST)

My intention when I suggested using LSM_ORDER_LAST was to
remove the explicit calls to BPF in the infrastructure.

>
> It would be great to hear the maintainers' perspective based on the
> trade-offs involved with the different approaches discussed.

Please bear in mind that the maintainer (James Morris) didn't
develop the hook list scheme.

> We are happy to adapt our approach based on the consensus we reach
> here.
>
> - KP
>
>>> - KP
>>>
>>>>>  	} while (0)
>>>>>  
>>>>> -#define call_int_hook(FUNC, IRC, ...) ({			\
>>>>> -	int RC = IRC;						\
>>>>> -	do {							\
>>>>> -		struct security_hook_list *P;			\
>>>>> -								\
>>>>> +#define call_int_hook(FUNC, IRC, ...) ({				\
>>>>> +	int RC = IRC;							\
>>>>> +	do {								\
>>>>> +		struct security_hook_list *P;				\
>>>>>  		hlist_for_each_entry(P, &security_hook_heads.FUNC, list) { \
>>>>> -			RC = P->hook.FUNC(__VA_ARGS__);		\
>>>>> -			if (RC != 0)				\
>>>>> -				break;				\
>>>>> -		}						\
>>>>> -	} while (0);						\
>>>>> -	RC;							\
>>>>> +			RC = P->hook.FUNC(__VA_ARGS__);			\
>>>>> +			if (RC != 0)					\
>>>>> +				break;					\
>>>>> +		}							\
>>>>> +		if (RC == 0)						\
>>>>> +			RC = CALL_BPF_LSM_INT_HOOKS(FUNC, __VA_ARGS__);	\
>>>>> +	} while (0);							\
>>>>> +	RC;								\
>>>>>  })
>>>>>  
>>>>>  /* Security operations */
KP Singh Jan. 24, 2020, 1:25 a.m. UTC | #6
On 23-Jan 15:50, Casey Schaufler wrote:
> On 1/23/2020 2:24 PM, KP Singh wrote:
> > On 23-Jan 11:09, Casey Schaufler wrote:
> >> On 1/23/2020 9:59 AM, KP Singh wrote:
> >>> On 23-Jan 09:03, Casey Schaufler wrote:
> >>>> On 1/23/2020 7:24 AM, KP Singh wrote:
> >>>>> From: KP Singh <kpsingh@google.com>
> >>>>>
> >>>>> - The list of hooks registered by an LSM is currently immutable as they
> >>>>>   are declared with __lsm_ro_after_init and they are attached to a
> >>>>>   security_hook_heads struct.
> >>>>> - For the BPF LSM we need to de/register the hooks at runtime. Making
> >>>>>   the existing security_hook_heads mutable broadens an
> >>>>>   attack vector, so a separate security_hook_heads is added for only
> >>>>>   those that ~must~ be mutable.
> >>>>> - These mutable hooks are run only after all the static hooks have
> >>>>>   successfully executed.
> >>>>>
> >>>>> This is based on the ideas discussed in:
> >>>>>
> >>>>>   https://lore.kernel.org/lkml/20180408065916.GA2832@ircssh-2.c.rugged-nimbus-611.internal
> >>>>>
> >>>>> Reviewed-by: Brendan Jackman <jackmanb@google.com>
> >>>>> Reviewed-by: Florent Revest <revest@google.com>
> >>>>> Reviewed-by: Thomas Garnier <thgarnie@google.com>
> >>>>> Signed-off-by: KP Singh <kpsingh@google.com>
> >>>>> ---
> >>>>>  MAINTAINERS             |  1 +
> >>>>>  include/linux/bpf_lsm.h | 72 +++++++++++++++++++++++++++++++++++++++++
> >>>>>  security/bpf/Kconfig    |  1 +
> >>>>>  security/bpf/Makefile   |  2 +-
> >>>>>  security/bpf/hooks.c    | 20 ++++++++++++
> >>>>>  security/bpf/lsm.c      |  7 ++++
> >>>>>  security/security.c     | 25 +++++++-------
> >>>>>  7 files changed, 116 insertions(+), 12 deletions(-)
> >>>>>  create mode 100644 include/linux/bpf_lsm.h
> >>>>>  create mode 100644 security/bpf/hooks.c
> >>>>>
> >>>>> diff --git a/MAINTAINERS b/MAINTAINERS
> >>>>> index e2b7f76a1a70..c606b3d89992 100644
> >>>>> --- a/MAINTAINERS
> >>>>> +++ b/MAINTAINERS
> >>>>> @@ -3209,6 +3209,7 @@ L:	linux-security-module@vger.kernel.org
> >>>>>  L:	bpf@vger.kernel.org
> >>>>>  S:	Maintained
> >>>>>  F:	security/bpf/
> >>>>> +F:	include/linux/bpf_lsm.h
> >>>>>  
> >>>>>  BROADCOM B44 10/100 ETHERNET DRIVER
> >>>>>  M:	Michael Chan <michael.chan@broadcom.com>
> >>>>> diff --git a/include/linux/bpf_lsm.h b/include/linux/bpf_lsm.h
> >>>>> new file mode 100644
> >>>>> index 000000000000..57c20b2cd2f4
> >>>>> --- /dev/null
> >>>>> +++ b/include/linux/bpf_lsm.h
> >>>>> @@ -0,0 +1,72 @@
> >>>>> +/* SPDX-License-Identifier: GPL-2.0 */
> >>>>> +
> >>>>> +/*
> >>>>> + * Copyright 2019 Google LLC.
> >>>>> + */
> >>>>> +
> >>>>> +#ifndef _LINUX_BPF_LSM_H
> >>>>> +#define _LINUX_BPF_LSM_H
> >>>>> +
> >>>>> +#include <linux/bpf.h>
> >>>>> +#include <linux/lsm_hooks.h>
> >>>>> +
> >>>>> +#ifdef CONFIG_SECURITY_BPF
> >>>>> +
> >>>>> +/* Mutable hooks defined at runtime and executed after all the statically
> >>>>> + * defined LSM hooks.
> >>>>> + */
> >>>>> +extern struct security_hook_heads bpf_lsm_hook_heads;
> >>>>> +
> >>>>> +int bpf_lsm_srcu_read_lock(void);
> >>>>> +void bpf_lsm_srcu_read_unlock(int idx);
> >>>>> +
> >>>>> +#define CALL_BPF_LSM_VOID_HOOKS(FUNC, ...)			\
> >>>>> +	do {							\
> >>>>> +		struct security_hook_list *P;			\
> >>>>> +		int _idx;					\
> >>>>> +								\
> >>>>> +		if (hlist_empty(&bpf_lsm_hook_heads.FUNC))	\
> >>>>> +			break;					\
> >>>>> +								\
> >>>>> +		_idx = bpf_lsm_srcu_read_lock();		\
> >>>>> +		hlist_for_each_entry(P, &bpf_lsm_hook_heads.FUNC, list) \
> >>>>> +			P->hook.FUNC(__VA_ARGS__);		\
> >>>>> +		bpf_lsm_srcu_read_unlock(_idx);			\
> >>>>> +	} while (0)
> >>>>> +
> >>>>> +#define CALL_BPF_LSM_INT_HOOKS(FUNC, ...) ({			\
> >>>>> +	int _ret = 0;						\
> >>>>> +	do {							\
> >>>>> +		struct security_hook_list *P;			\
> >>>>> +		int _idx;					\
> >>>>> +								\
> >>>>> +		if (hlist_empty(&bpf_lsm_hook_heads.FUNC))	\
> >>>>> +			break;					\
> >>>>> +								\
> >>>>> +		_idx = bpf_lsm_srcu_read_lock();		\
> >>>>> +								\
> >>>>> +		hlist_for_each_entry(P,				\
> >>>>> +			&bpf_lsm_hook_heads.FUNC, list) {	\
> >>>>> +			_ret = P->hook.FUNC(__VA_ARGS__);		\
> >>>>> +			if (_ret && IS_ENABLED(CONFIG_SECURITY_BPF_ENFORCE)) \
> >>>>> +				break;				\
> >>>>> +		}						\
> >>>>> +		bpf_lsm_srcu_read_unlock(_idx);			\
> >>>>> +	} while (0);						\
> >>>>> +	IS_ENABLED(CONFIG_SECURITY_BPF_ENFORCE) ? _ret : 0;	\
> >>>>> +})
> >>>>> +
> >>>>> +#else /* !CONFIG_SECURITY_BPF */
> >>>>> +
> >>>>> +#define CALL_BPF_LSM_INT_HOOKS(FUNC, ...) (0)
> >>>>> +#define CALL_BPF_LSM_VOID_HOOKS(...)
> >>>>> +
> >>>>> +static inline int bpf_lsm_srcu_read_lock(void)
> >>>>> +{
> >>>>> +	return 0;
> >>>>> +}
> >>>>> +static inline void bpf_lsm_srcu_read_unlock(int idx) {}
> >>>>> +
> >>>>> +#endif /* CONFIG_SECURITY_BPF */
> >>>>> +
> >>>>> +#endif /* _LINUX_BPF_LSM_H */
> >>>>> diff --git a/security/bpf/Kconfig b/security/bpf/Kconfig
> >>>>> index a5f6c67ae526..595e4ad597ae 100644
> >>>>> --- a/security/bpf/Kconfig
> >>>>> +++ b/security/bpf/Kconfig
> >>>>> @@ -6,6 +6,7 @@ config SECURITY_BPF
> >>>>>  	bool "BPF-based MAC and audit policy"
> >>>>>  	depends on SECURITY
> >>>>>  	depends on BPF_SYSCALL
> >>>>> +	depends on SRCU
> >>>>>  	help
> >>>>>  	  This enables instrumentation of the security hooks with
> >>>>>  	  eBPF programs.
> >>>>> diff --git a/security/bpf/Makefile b/security/bpf/Makefile
> >>>>> index c78a8a056e7e..c526927c337d 100644
> >>>>> --- a/security/bpf/Makefile
> >>>>> +++ b/security/bpf/Makefile
> >>>>> @@ -2,4 +2,4 @@
> >>>>>  #
> >>>>>  # Copyright 2019 Google LLC.
> >>>>>  
> >>>>> -obj-$(CONFIG_SECURITY_BPF) := lsm.o ops.o
> >>>>> +obj-$(CONFIG_SECURITY_BPF) := lsm.o ops.o hooks.o
> >>>>> diff --git a/security/bpf/hooks.c b/security/bpf/hooks.c
> >>>>> new file mode 100644
> >>>>> index 000000000000..b123d9cb4cd4
> >>>>> --- /dev/null
> >>>>> +++ b/security/bpf/hooks.c
> >>>>> @@ -0,0 +1,20 @@
> >>>>> +// SPDX-License-Identifier: GPL-2.0
> >>>>> +
> >>>>> +/*
> >>>>> + * Copyright 2019 Google LLC.
> >>>>> + */
> >>>>> +
> >>>>> +#include <linux/bpf_lsm.h>
> >>>>> +#include <linux/srcu.h>
> >>>>> +
> >>>>> +DEFINE_STATIC_SRCU(security_hook_srcu);
> >>>>> +
> >>>>> +int bpf_lsm_srcu_read_lock(void)
> >>>>> +{
> >>>>> +	return srcu_read_lock(&security_hook_srcu);
> >>>>> +}
> >>>>> +
> >>>>> +void bpf_lsm_srcu_read_unlock(int idx)
> >>>>> +{
> >>>>> +	return srcu_read_unlock(&security_hook_srcu, idx);
> >>>>> +}
> >>>>> diff --git a/security/bpf/lsm.c b/security/bpf/lsm.c
> >>>>> index dc9ac03c7aa0..a25a068e1781 100644
> >>>>> --- a/security/bpf/lsm.c
> >>>>> +++ b/security/bpf/lsm.c
> >>>>> @@ -4,6 +4,7 @@
> >>>>>   * Copyright 2019 Google LLC.
> >>>>>   */
> >>>>>  
> >>>>> +#include <linux/bpf_lsm.h>
> >>>>>  #include <linux/lsm_hooks.h>
> >>>>>  
> >>>>>  /* This is only for internal hooks, always statically shipped as part of the
> >>>>> @@ -12,6 +13,12 @@
> >>>>>   */
> >>>>>  static struct security_hook_list bpf_lsm_hooks[] __lsm_ro_after_init = {};
> >>>>>  
> >>>>> +/* Security hooks registered dynamically by the BPF LSM and must be accessed
> >>>>> + * by holding bpf_lsm_srcu_read_lock and bpf_lsm_srcu_read_unlock. The mutable
> >>>>> + * hooks dynamically allocated by the BPF LSM are appeneded here.
> >>>>> + */
> >>>>> +struct security_hook_heads bpf_lsm_hook_heads;
> >>>>> +
> >>>>>  static int __init bpf_lsm_init(void)
> >>>>>  {
> >>>>>  	security_add_hooks(bpf_lsm_hooks, ARRAY_SIZE(bpf_lsm_hooks), "bpf");
> >>>>> diff --git a/security/security.c b/security/security.c
> >>>>> index 30a8aa700557..95a46ca25dcd 100644
> >>>>> --- a/security/security.c
> >>>>> +++ b/security/security.c
> >>>>> @@ -27,6 +27,7 @@
> >>>>>  #include <linux/backing-dev.h>
> >>>>>  #include <linux/string.h>
> >>>>>  #include <linux/msg.h>
> >>>>> +#include <linux/bpf_lsm.h>
> >>>>>  #include <net/flow.h>
> >>>>>  
> >>>>>  #define MAX_LSM_EVM_XATTR	2
> >>>>> @@ -657,20 +658,22 @@ static void __init lsm_early_task(struct task_struct *task)
> >>>>>  								\
> >>>>>  		hlist_for_each_entry(P, &security_hook_heads.FUNC, list) \
> >>>>>  			P->hook.FUNC(__VA_ARGS__);		\
> >>>>> +		CALL_BPF_LSM_VOID_HOOKS(FUNC, __VA_ARGS__);	\
> >>>> I'm sorry if I wasn't clear on the v2 review.
> >>>> This does not belong in the infrastructure. You should be
> >>>> doing all the bpf_lsm hook processing in you module.
> >>>> bpf_lsm_task_alloc() should loop though all the bpf
> >>>> task_alloc hooks if they have to be handled differently
> >>>> from "normal" LSM hooks.
> >>> The BPF LSM does not define static hooks (the ones registered to
> >>> security_hook_heads in security.c with __lsm_ro_after_init) for each
> >>> LSM hook. If it tries to do that one ends with what was in v1:
> >>>
> >>>   https://lore.kernel.org/bpf/20191220154208.15895-7-kpsingh@chromium.org
> >>>
> >>> This gets quite ugly (security/bpf/hooks.h from v1) and was noted by
> >>> the BPF maintainers:
> >>>
> >>>   https://lore.kernel.org/bpf/20191222012722.gdqhppxpfmqfqbld@ast-mbp.dhcp.thefacebook.com/
> >>>
> >>> As I mentioned, some of the ideas we used here are based on:
> >>>
> >>>   https://lore.kernel.org/lkml/20180408065916.GA2832@ircssh-2.c.rugged-nimbus-611.internal
> >>>
> >>> Which gave each LSM the ability to add mutable hooks at runtime. If
> >>> you prefer we can make this generic and allow the LSMs to register
> >>> mutable hooks with the BPF LSM be the only LSM that uses it (and
> >>> enforce it with a whitelist).
> >>>
> >>> Would this generic approach be something you would consider better
> >>> than just calling the BPF mutable hooks directly?
> >> What I think makes sense is for the BPF LSM to have a hook
> >> for each of the interfaces and for that hook to handle the
> >> mutable list for the interface. If BPF not included there
> >> will be no mutable hooks. 
> >>
> >> Yes, your v1 got this right.
> > BPF LSM does provide mutable LSM hooks and it ends up being simpler
> > to implement/maintain when they are treated as such.
> 
> If you want to put mutable hook handling in the infrastructure
> you need to make it general mutable hook handling as opposed to
> BPF hook handling. I don't know if that would be acceptable for
> all the reasons called out about dynamic module loading.

We can have generic mutable hook handling and if an LSM doesn't
provide a mutable security_hook_heads, it would not allow dynamic
hooks / dynamic module loading.

So, in practice it will just be the BPF LSM that allows mutable hooks
and the other existing LSMs won't. I guess it will be cleaner than
calling the BPF hooks directly from the LSM code (i.e in security.c)

- KP

> 
> >
> >  The other approaches which we have considered are:
> >
> > - Using macro magic to allocate static hook bodies which call eBPF
> >   programs as implemented in v1. This entails maintaining a
> >   separate list of LSM hooks in the BPF LSM which is evident from the
> >   giant security/bpf/include/hooks.h in:
> >
> >   https://lore.kernel.org/bpf/20191220154208.15895-7-kpsingh@chromium.org
> 
> I haven't put much though into how you might make that cleaner,
> but I don't see how you can expect any approach to turn out
> smaller than or less ugly than security.c.
> 
> >
> > - Another approach one can think of is to allocate all the trampoline
> >   images (one page each) at __init and update these images to invoke
> >   BPF programs when they are attached.
> >
> > Both these approaches seem to suffer from the downside of doing more
> > work when it's not really needed (i.e. doing prep work for hooks which
> > have no eBPF programs attached) and they appear to to mask the fact
> > that what the BPF LSM provides is actually mutable LSM hooks by
> > allocating static wrappers around mutable callbacks.
> 
> That's a "feature" of the LSM infrastructure. If you're not using a hook
> you just don't provide one. It is a artifact of your intent of providing
> a general extension that requires you provide a hook which may do nothing
> for every interface.
> 
> >
> > Are there other downsides apart from the fact we have an explicit call
> > to the mutable hooks in the LSM code? (Note that we want to have these
> > mutable hooks run after all the static LSM hooks so ordering
> > would still end up being LSM_ORDER_LAST)
> 
> My intention when I suggested using LSM_ORDER_LAST was to
> remove the explicit calls to BPF in the infrastructure.
> 
> >
> > It would be great to hear the maintainers' perspective based on the
> > trade-offs involved with the different approaches discussed.
> 
> Please bear in mind that the maintainer (James Morris) didn't
> develop the hook list scheme.
> 
> > We are happy to adapt our approach based on the consensus we reach
> > here.
> >
> > - KP
> >
> >>> - KP
> >>>
> >>>>>  	} while (0)
> >>>>>  
> >>>>> -#define call_int_hook(FUNC, IRC, ...) ({			\
> >>>>> -	int RC = IRC;						\
> >>>>> -	do {							\
> >>>>> -		struct security_hook_list *P;			\
> >>>>> -								\
> >>>>> +#define call_int_hook(FUNC, IRC, ...) ({				\
> >>>>> +	int RC = IRC;							\
> >>>>> +	do {								\
> >>>>> +		struct security_hook_list *P;				\
> >>>>>  		hlist_for_each_entry(P, &security_hook_heads.FUNC, list) { \
> >>>>> -			RC = P->hook.FUNC(__VA_ARGS__);		\
> >>>>> -			if (RC != 0)				\
> >>>>> -				break;				\
> >>>>> -		}						\
> >>>>> -	} while (0);						\
> >>>>> -	RC;							\
> >>>>> +			RC = P->hook.FUNC(__VA_ARGS__);			\
> >>>>> +			if (RC != 0)					\
> >>>>> +				break;					\
> >>>>> +		}							\
> >>>>> +		if (RC == 0)						\
> >>>>> +			RC = CALL_BPF_LSM_INT_HOOKS(FUNC, __VA_ARGS__);	\
> >>>>> +	} while (0);							\
> >>>>> +	RC;								\
> >>>>>  })
> >>>>>  
> >>>>>  /* Security operations */
>
James Morris Jan. 24, 2020, 9:55 p.m. UTC | #7
On Thu, 23 Jan 2020, KP Singh wrote:

> 
> > If you want to put mutable hook handling in the infrastructure
> > you need to make it general mutable hook handling as opposed to
> > BPF hook handling. I don't know if that would be acceptable for
> > all the reasons called out about dynamic module loading.
> 
> We can have generic mutable hook handling and if an LSM doesn't
--> provide a mutable security_hook_heads, it would not allow dynamic
> hooks / dynamic module loading.
> 
> So, in practice it will just be the BPF LSM that allows mutable hooks
> and the other existing LSMs won't. I guess it will be cleaner than
> calling the BPF hooks directly from the LSM code (i.e in security.c)

I'm inclined to only have mutable hooks for KRSI, not for all LSMs. This 
is a special case and we don't need to provide this for anyone else.

Btw, folks, PLEASE trim replies.
Alexei Starovoitov Feb. 11, 2020, 3:12 a.m. UTC | #8
On Thu, Jan 23, 2020 at 07:24:34AM -0800, KP Singh wrote:
> +#define CALL_BPF_LSM_INT_HOOKS(FUNC, ...) ({			\
> +	int _ret = 0;						\
> +	do {							\
> +		struct security_hook_list *P;			\
> +		int _idx;					\
> +								\
> +		if (hlist_empty(&bpf_lsm_hook_heads.FUNC))	\
> +			break;					\
> +								\
> +		_idx = bpf_lsm_srcu_read_lock();		\
> +								\
> +		hlist_for_each_entry(P,				\
> +			&bpf_lsm_hook_heads.FUNC, list) {	\
> +			_ret = P->hook.FUNC(__VA_ARGS__);		\
> +			if (_ret && IS_ENABLED(CONFIG_SECURITY_BPF_ENFORCE)) \
> +				break;				\
> +		}						\
> +		bpf_lsm_srcu_read_unlock(_idx);			\
> +	} while (0);						\
> +	IS_ENABLED(CONFIG_SECURITY_BPF_ENFORCE) ? _ret : 0;	\
> +})

This extra CONFIG_SECURITY_BPF_ENFORCE doesn't make sense to me.
Why do all the work for bpf-lsm and ignore return code? Such framework already
exists. For audit only case the user could have kprobed security_*() in
security/security.c and had access to exactly the same data. There is no need
in any of these patches if audit the only use case.
Obviously bpf-lsm has to be capable of making go/no-go decision, so
my preference is to drop this extra kconfig knob.
I think the agreement seen in earlier comments in this thread that the prefered
choice is to always have bpf-based lsm to be equivalent to LSM_ORDER_LAST. In
such case how about using bpf-trampoline fexit logic instead?
Pros:
- no changes to security/ directory
- no changes to call_int_hook() macro
- patches 4, 5, 6 no longer necessary
- when security is off all security_*() hooks do single
  if (hlist_empty(&bpf_lsm_hook_heads.FUNC)) check.
  With patch 4 there will two such checks. Tiny perf penalty.
  With fexit approach there will be no extra check.
- fexit approach is fast even on kernels compiled with retpoline, since
  its using direct calls
Cons:
- bpf trampoline so far is x86 only and arm64 support is wip

By plugging into fexit I'm proposing to let bpf-lsm prog type modify return
value. Currently bpf-fexit prog type has read-only access to it. Adding write
access is a straightforward verifier change. The bpf progs from patch 9 will
still look exactly the same way:
SEC("lsm/file_mprotect")
int BPF_PROG(mprotect_audit, struct vm_area_struct *vma,
            unsigned long reqprot, unsigned long prot) { ... }
The difference that libbpf will be finding btf_id of security_file_mprotect()
function and adding fexit trampoline to it instead of going via
security_list_options and its own lsm_hook_idx uapi. I think reusing existing
tracing facilities to attach and multiplex multiple programs is cleaner. More
code reuse. Unified testing of lsm and tracing, etc.
KP Singh Feb. 11, 2020, 12:43 p.m. UTC | #9
On 10-Feb 19:12, Alexei Starovoitov wrote:
> On Thu, Jan 23, 2020 at 07:24:34AM -0800, KP Singh wrote:
> > +#define CALL_BPF_LSM_INT_HOOKS(FUNC, ...) ({			\
> > +	int _ret = 0;						\
> > +	do {							\
> > +		struct security_hook_list *P;			\
> > +		int _idx;					\
> > +								\
> > +		if (hlist_empty(&bpf_lsm_hook_heads.FUNC))	\
> > +			break;					\
> > +								\
> > +		_idx = bpf_lsm_srcu_read_lock();		\
> > +								\
> > +		hlist_for_each_entry(P,				\
> > +			&bpf_lsm_hook_heads.FUNC, list) {	\
> > +			_ret = P->hook.FUNC(__VA_ARGS__);		\
> > +			if (_ret && IS_ENABLED(CONFIG_SECURITY_BPF_ENFORCE)) \
> > +				break;				\
> > +		}						\
> > +		bpf_lsm_srcu_read_unlock(_idx);			\
> > +	} while (0);						\
> > +	IS_ENABLED(CONFIG_SECURITY_BPF_ENFORCE) ? _ret : 0;	\
> > +})
> 
> This extra CONFIG_SECURITY_BPF_ENFORCE doesn't make sense to me.

We found it easier to debug the programs if enforcement is disabled.
But we can remove this option if you feel strongly about it.

> Why do all the work for bpf-lsm and ignore return code? Such framework already
> exists. For audit only case the user could have kprobed security_*() in
> security/security.c and had access to exactly the same data. There is no need
> in any of these patches if audit the only use case.
> Obviously bpf-lsm has to be capable of making go/no-go decision, so
> my preference is to drop this extra kconfig knob.
> I think the agreement seen in earlier comments in this thread that the prefered
> choice is to always have bpf-based lsm to be equivalent to LSM_ORDER_LAST. In
> such case how about using bpf-trampoline fexit logic instead?

Even if the BPF LSM is LSM_ORDER_LAST this is still different from
adding a trampoline to the exit of the security hooks for a few other
reasons.

> Pros:
> - no changes to security/ directory

* We do need to initialize the BPF LSM as a proper LSM (i.e. in
  security/bpf) because it needs access to security blobs. This is
  only possible statically for now as they should be set after boot
  time to provide guarantees to any helper that uses information in
  security blobs. I have proposed how we could make this dynamic as a
  discussion topic for the BPF conference:

    https://lore.kernel.org/bpf/20200127171516.GA2710@chromium.org

  As you can see from some of the prototype use cases e.g:

    https://github.com/sinkap/linux-krsi/blob/patch/v1/examples/samples/bpf/lsm_detect_exec_unlink.c

  that they do rely on security blobs and that they are key in doing
  meaningful security analysis.

* When using the semantic provided by fexit, the BPF LSM program will
  always be executed and will be able to override / clobber the
  decision of LSMs which appear before it in the ordered list. This
  semantic is very different from what we currently have (i.e. the BPF
  LSM hook is only called if all the other LSMs allow the action) and
  seems to be bypassing the LSM framework.

* Not all security_* wrappers simply call the attached hooks and return
  their exit code and not all of them pass the same arguments to the
  hook e.g. security_bprm_check, security_file_open,
  security_task_alloc to just name a few. Illustrating this further
  using security_task_alloc as an example:

	rc = call_int_hook(task_alloc, 0, task, clone_flags);
	if (unlikely(rc))
		security_task_free(task);
	return rc;

Which means we would leak task_structs in this case. While
call_int_hook is sort of equivalent to the fexit trampoline for most
hooks, it's not really the case for some (quite important) LSM hooks.

- KP

> - no changes to call_int_hook() macro
> - patches 4, 5, 6 no longer necessary
> - when security is off all security_*() hooks do single
>   if (hlist_empty(&bpf_lsm_hook_heads.FUNC)) check.
>   With patch 4 there will two such checks. Tiny perf penalty.
>   With fexit approach there will be no extra check.
> - fexit approach is fast even on kernels compiled with retpoline, since
>   its using direct calls
> Cons:
> - bpf trampoline so far is x86 only and arm64 support is wip
> 
> By plugging into fexit I'm proposing to let bpf-lsm prog type modify return
> value. Currently bpf-fexit prog type has read-only access to it. Adding write
> access is a straightforward verifier change. The bpf progs from patch 9 will
> still look exactly the same way:
> SEC("lsm/file_mprotect")
> int BPF_PROG(mprotect_audit, struct vm_area_struct *vma,
>             unsigned long reqprot, unsigned long prot) { ... }
> The difference that libbpf will be finding btf_id of security_file_mprotect()
> function and adding fexit trampoline to it instead of going via
> security_list_options and its own lsm_hook_idx uapi. I think reusing existing
> tracing facilities to attach and multiplex multiple programs is cleaner. More
> code reuse. Unified testing of lsm and tracing, etc.
Alexei Starovoitov Feb. 11, 2020, 5:58 p.m. UTC | #10
On Tue, Feb 11, 2020 at 01:43:34PM +0100, KP Singh wrote:
> 
> > Pros:
> > - no changes to security/ directory
> 
> * We do need to initialize the BPF LSM as a proper LSM (i.e. in
>   security/bpf) because it needs access to security blobs. This is
>   only possible statically for now as they should be set after boot
>   time to provide guarantees to any helper that uses information in
>   security blobs. I have proposed how we could make this dynamic as a
>   discussion topic for the BPF conference:
> 
>     https://lore.kernel.org/bpf/20200127171516.GA2710@chromium.org
> 
>   As you can see from some of the prototype use cases e.g:
> 
>     https://github.com/sinkap/linux-krsi/blob/patch/v1/examples/samples/bpf/lsm_detect_exec_unlink.c
> 
>   that they do rely on security blobs and that they are key in doing
>   meaningful security analysis.

above example doesn't use security blob from bpf prog.
Are you referring to
https://github.com/sinkap/linux-krsi/blob/patch/v1/examples/security/bpf/ops.c#L455
Then it's a bpf helper that is using it. And that helper could have been
implemented differently. I think it should be a separate discussion on merits
of such helper, its api, and its implementation.

At the same time I agree that additional scratch space accessible by lsm in
inode->i_security, cred->security and other kernel structs is certainly
necessary, but it's a nack to piggy back on legacy lsm way of doing it. The
implementation of bpf_lsm_blob_sizes.lbs_inode fits one single purpose. It's
fine for builtin LSM where blob sizes and code that uses it lives in one place
in the kernel and being modified at once when need for more space arises. For
bpf progs such approach is a non starter. Progs need to have flexible amount
scratch space. Thankfully this problem is not new. It was solved already.
Please see how bpf_sk_storage is implemented. It's a flexible amount of scratch
spaces available to bpf progs that is available in every socket. It's done on
demand. No space is wasted when progs are not using it. Not all sockets has to
have it either. I strongly believe that the same approach should be used for
scratch space in inode, cred, etc. It can be a union of existing 'void
*security' pointer or a new pointer. net/core/bpf_sk_storage.c implementation
is not socket specific. It can be generalized for inode, cred, task, etc.

> * When using the semantic provided by fexit, the BPF LSM program will
>   always be executed and will be able to override / clobber the
>   decision of LSMs which appear before it in the ordered list. This
>   semantic is very different from what we currently have (i.e. the BPF
>   LSM hook is only called if all the other LSMs allow the action) and
>   seems to be bypassing the LSM framework.

It that's a concern it's trivial to add 'if (RC == 0)' check to fexit
trampoline generator specific to lsm progs.

> * Not all security_* wrappers simply call the attached hooks and return
>   their exit code and not all of them pass the same arguments to the
>   hook e.g. security_bprm_check, security_file_open,
>   security_task_alloc to just name a few. Illustrating this further
>   using security_task_alloc as an example:
> 
> 	rc = call_int_hook(task_alloc, 0, task, clone_flags);
> 	if (unlikely(rc))
> 		security_task_free(task);
> 	return rc;
> 
> Which means we would leak task_structs in this case. While
> call_int_hook is sort of equivalent to the fexit trampoline for most
> hooks, it's not really the case for some (quite important) LSM hooks.

let's look at them one by one.
1.
security_bprm_check() calling ima_bprm_check.
I think it's layering violation.
Was it that hard to convince vfs folks to add it in fs/exec.c where
it belongs?

2.
security_file_open() calling fsnotify_perm().
Same layering violation and it's clearly broken.
When CONFIG_SECURITY is not defined:
static inline int security_file_open(struct file *file)
{
        return 0;
}
There is no call to fsnotify_perm().
So fsnotify_open/mkdir/etc() work fine with and without CONFIG_SECURITY,
but fsnotify_perm() events can be lost depending on kconfig.
fsnotify_perm() should be moved in fs/open.c.

3.
security_task_alloc(). hmm.
when CONFIG_SECURITY is enabled and corresponding LSM with
non zero blob_sizes.lbs_task is registered that hook allocates
memory even if task_alloc is empty.
Doing security_task_free() in that hook also looks wrong.
It should have been:
diff --git a/kernel/fork.c b/kernel/fork.c
index ef82feb4bddc..a0d31e781341 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -2062,7 +2062,7 @@ static __latent_entropy struct task_struct *copy_process(
        shm_init_task(p);
        retval = security_task_alloc(p, clone_flags);
        if (retval)
-               goto bad_fork_cleanup_audit;
+               goto bad_fork_cleanup_security;
Same issue with security_file_alloc().

I think this layering issues should be fixed, but it's not a blocker for
lsm-bpf to proceed. Using fexit mechanism and bpf_sk_storage generalization is
all that is needed. None of it should touch security/*.
Jann Horn Feb. 11, 2020, 6:44 p.m. UTC | #11
On Tue, Feb 11, 2020 at 6:58 PM Alexei Starovoitov
<alexei.starovoitov@gmail.com> wrote:
> On Tue, Feb 11, 2020 at 01:43:34PM +0100, KP Singh wrote:
[...]
> > * When using the semantic provided by fexit, the BPF LSM program will
> >   always be executed and will be able to override / clobber the
> >   decision of LSMs which appear before it in the ordered list. This
> >   semantic is very different from what we currently have (i.e. the BPF
> >   LSM hook is only called if all the other LSMs allow the action) and
> >   seems to be bypassing the LSM framework.
>
> It that's a concern it's trivial to add 'if (RC == 0)' check to fexit
> trampoline generator specific to lsm progs.
[...]
> Using fexit mechanism and bpf_sk_storage generalization is
> all that is needed. None of it should touch security/*.

If I understand your suggestion correctly, that seems like a terrible
idea to me from the perspective of inspectability and debuggability.
If at runtime, a function can branch off elsewhere to modify its
decision, I want to see that in the source code. If someone e.g.
changes the parameters or the locking rules around a security hook,
how are they supposed to understand the implications if that happens
through some magic fexit trampoline that is injected at runtime?
Alexei Starovoitov Feb. 11, 2020, 7:09 p.m. UTC | #12
On Tue, Feb 11, 2020 at 07:44:05PM +0100, Jann Horn wrote:
> On Tue, Feb 11, 2020 at 6:58 PM Alexei Starovoitov
> <alexei.starovoitov@gmail.com> wrote:
> > On Tue, Feb 11, 2020 at 01:43:34PM +0100, KP Singh wrote:
> [...]
> > > * When using the semantic provided by fexit, the BPF LSM program will
> > >   always be executed and will be able to override / clobber the
> > >   decision of LSMs which appear before it in the ordered list. This
> > >   semantic is very different from what we currently have (i.e. the BPF
> > >   LSM hook is only called if all the other LSMs allow the action) and
> > >   seems to be bypassing the LSM framework.
> >
> > It that's a concern it's trivial to add 'if (RC == 0)' check to fexit
> > trampoline generator specific to lsm progs.
> [...]
> > Using fexit mechanism and bpf_sk_storage generalization is
> > all that is needed. None of it should touch security/*.
> 
> If I understand your suggestion correctly, that seems like a terrible
> idea to me from the perspective of inspectability and debuggability.
> If at runtime, a function can branch off elsewhere to modify its
> decision, I want to see that in the source code. If someone e.g.
> changes the parameters or the locking rules around a security hook,
> how are they supposed to understand the implications if that happens
> through some magic fexit trampoline that is injected at runtime?

I'm not following the concern. There is error injection facility that is
heavily used with and without bpf. In this case there is really no difference
whether trampoline is used with direct call or indirect callback via function
pointer. Both will jump to bpf prog. The _source code_ of bpf program will
_always_ be available for humans to examine via "bpftool prog dump" since BTF
is required. So from inspectability and debuggability point of view lsm+bpf
stuff is way more visible than any builtin LSM. At any time people will be able
to see what exactly is running on the system. Assuming folks can read C code.
Jann Horn Feb. 11, 2020, 7:36 p.m. UTC | #13
On Tue, Feb 11, 2020 at 8:09 PM Alexei Starovoitov
<alexei.starovoitov@gmail.com> wrote:
> On Tue, Feb 11, 2020 at 07:44:05PM +0100, Jann Horn wrote:
> > On Tue, Feb 11, 2020 at 6:58 PM Alexei Starovoitov
> > <alexei.starovoitov@gmail.com> wrote:
> > > On Tue, Feb 11, 2020 at 01:43:34PM +0100, KP Singh wrote:
> > [...]
> > > > * When using the semantic provided by fexit, the BPF LSM program will
> > > >   always be executed and will be able to override / clobber the
> > > >   decision of LSMs which appear before it in the ordered list. This
> > > >   semantic is very different from what we currently have (i.e. the BPF
> > > >   LSM hook is only called if all the other LSMs allow the action) and
> > > >   seems to be bypassing the LSM framework.
> > >
> > > It that's a concern it's trivial to add 'if (RC == 0)' check to fexit
> > > trampoline generator specific to lsm progs.
> > [...]
> > > Using fexit mechanism and bpf_sk_storage generalization is
> > > all that is needed. None of it should touch security/*.
> >
> > If I understand your suggestion correctly, that seems like a terrible
> > idea to me from the perspective of inspectability and debuggability.
> > If at runtime, a function can branch off elsewhere to modify its
> > decision, I want to see that in the source code. If someone e.g.
> > changes the parameters or the locking rules around a security hook,
> > how are they supposed to understand the implications if that happens
> > through some magic fexit trampoline that is injected at runtime?
>
> I'm not following the concern. There is error injection facility that is
> heavily used with and without bpf. In this case there is really no difference
> whether trampoline is used with direct call or indirect callback via function
> pointer. Both will jump to bpf prog. The _source code_ of bpf program will
> _always_ be available for humans to examine via "bpftool prog dump" since BTF
> is required. So from inspectability and debuggability point of view lsm+bpf
> stuff is way more visible than any builtin LSM. At any time people will be able
> to see what exactly is running on the system. Assuming folks can read C code.

You said that you want to use fexit without touching security/, which
AFAIU means that the branch from security_*() to the BPF LSM will be
invisible in the *kernel's* source code unless the reader already
knows about the BPF LSM. But maybe I'm just misunderstanding your
idea.

If a random developer is trying to change the locking rules around
security_blah(), and wants to e.g. figure out whether it's okay to
call that thing with a spinlock held, or whether one of the arguments
is actually used, or stuff like that, the obvious way to verify that
is to follow all the direct and indirect calls made from
security_blah(). It's tedious, but it works, unless something is
hooked up to it in a way that is visible in no way in the source code.

I agree that the way in which the call happens behind the scenes
doesn't matter all that much - I don't really care all that much
whether it's an indirect call, a runtime-patched direct call in inline
assembly, or an fexit hook. What I do care about is that someone
reading through any affected function can immediately see that the
branch exists - in other words, ideally, I'd like it to be something
happening in the method body, but if you think that's unacceptable, I
think there should at least be a function attribute that makes it very
clear what's going on.
Alexei Starovoitov Feb. 11, 2020, 8:10 p.m. UTC | #14
On Tue, Feb 11, 2020 at 08:36:18PM +0100, Jann Horn wrote:
> On Tue, Feb 11, 2020 at 8:09 PM Alexei Starovoitov
> <alexei.starovoitov@gmail.com> wrote:
> > On Tue, Feb 11, 2020 at 07:44:05PM +0100, Jann Horn wrote:
> > > On Tue, Feb 11, 2020 at 6:58 PM Alexei Starovoitov
> > > <alexei.starovoitov@gmail.com> wrote:
> > > > On Tue, Feb 11, 2020 at 01:43:34PM +0100, KP Singh wrote:
> > > [...]
> > > > > * When using the semantic provided by fexit, the BPF LSM program will
> > > > >   always be executed and will be able to override / clobber the
> > > > >   decision of LSMs which appear before it in the ordered list. This
> > > > >   semantic is very different from what we currently have (i.e. the BPF
> > > > >   LSM hook is only called if all the other LSMs allow the action) and
> > > > >   seems to be bypassing the LSM framework.
> > > >
> > > > It that's a concern it's trivial to add 'if (RC == 0)' check to fexit
> > > > trampoline generator specific to lsm progs.
> > > [...]
> > > > Using fexit mechanism and bpf_sk_storage generalization is
> > > > all that is needed. None of it should touch security/*.
> > >
> > > If I understand your suggestion correctly, that seems like a terrible
> > > idea to me from the perspective of inspectability and debuggability.
> > > If at runtime, a function can branch off elsewhere to modify its
> > > decision, I want to see that in the source code. If someone e.g.
> > > changes the parameters or the locking rules around a security hook,
> > > how are they supposed to understand the implications if that happens
> > > through some magic fexit trampoline that is injected at runtime?
> >
> > I'm not following the concern. There is error injection facility that is
> > heavily used with and without bpf. In this case there is really no difference
> > whether trampoline is used with direct call or indirect callback via function
> > pointer. Both will jump to bpf prog. The _source code_ of bpf program will
> > _always_ be available for humans to examine via "bpftool prog dump" since BTF
> > is required. So from inspectability and debuggability point of view lsm+bpf
> > stuff is way more visible than any builtin LSM. At any time people will be able
> > to see what exactly is running on the system. Assuming folks can read C code.
> 
> You said that you want to use fexit without touching security/, which
> AFAIU means that the branch from security_*() to the BPF LSM will be
> invisible in the *kernel's* source code unless the reader already
> knows about the BPF LSM. But maybe I'm just misunderstanding your
> idea.
> 
> If a random developer is trying to change the locking rules around
> security_blah(), and wants to e.g. figure out whether it's okay to
> call that thing with a spinlock held, or whether one of the arguments
> is actually used, or stuff like that, the obvious way to verify that
> is to follow all the direct and indirect calls made from
> security_blah(). It's tedious, but it works, unless something is
> hooked up to it in a way that is visible in no way in the source code.
> 
> I agree that the way in which the call happens behind the scenes
> doesn't matter all that much - I don't really care all that much
> whether it's an indirect call, a runtime-patched direct call in inline
> assembly, or an fexit hook. What I do care about is that someone
> reading through any affected function can immediately see that the
> branch exists - in other words, ideally, I'd like it to be something
> happening in the method body, but if you think that's unacceptable, I
> think there should at least be a function attribute that makes it very
> clear what's going on.

Got it. Then let's whitelist them ?
All error injection points are marked with ALLOW_ERROR_INJECTION().
We can do something similar here, but let's do it via BTF and avoid
abusing yet another elf section for this mark.
I think BTF_TYPE_EMIT() should work. Just need to pick explicit enough
name and extensive comment about what is going on.
Locking rules and cleanup around security_blah() shouldn't change though.
Like security_task_alloc() should be paired with security_task_free().
And so on. With bpf_sk_storage like logic the alloc/free of scratch
space will be similar to the way socket and bpf progs deal with it.

Some of the lsm hooks are in critical path. Like security_socket_sendmsg().
retpoline hurts. If we go with indirect calls right now it will be harder to
optimize later. It took us long time to come up with bpf trampoline and build
bpf dispatcher on top of it to remove single indirect call from XDP runtime.
For bpf+lsm would be good to avoid it from the start.
Jann Horn Feb. 11, 2020, 8:33 p.m. UTC | #15
()On Tue, Feb 11, 2020 at 9:10 PM Alexei Starovoitov
<alexei.starovoitov@gmail.com> wrote:
> On Tue, Feb 11, 2020 at 08:36:18PM +0100, Jann Horn wrote:
> > On Tue, Feb 11, 2020 at 8:09 PM Alexei Starovoitov
> > <alexei.starovoitov@gmail.com> wrote:
> > > On Tue, Feb 11, 2020 at 07:44:05PM +0100, Jann Horn wrote:
> > > > On Tue, Feb 11, 2020 at 6:58 PM Alexei Starovoitov
> > > > <alexei.starovoitov@gmail.com> wrote:
> > > > > On Tue, Feb 11, 2020 at 01:43:34PM +0100, KP Singh wrote:
> > > > [...]
> > > > > > * When using the semantic provided by fexit, the BPF LSM program will
> > > > > >   always be executed and will be able to override / clobber the
> > > > > >   decision of LSMs which appear before it in the ordered list. This
> > > > > >   semantic is very different from what we currently have (i.e. the BPF
> > > > > >   LSM hook is only called if all the other LSMs allow the action) and
> > > > > >   seems to be bypassing the LSM framework.
> > > > >
> > > > > It that's a concern it's trivial to add 'if (RC == 0)' check to fexit
> > > > > trampoline generator specific to lsm progs.
> > > > [...]
> > > > > Using fexit mechanism and bpf_sk_storage generalization is
> > > > > all that is needed. None of it should touch security/*.
> > > >
> > > > If I understand your suggestion correctly, that seems like a terrible
> > > > idea to me from the perspective of inspectability and debuggability.
> > > > If at runtime, a function can branch off elsewhere to modify its
> > > > decision, I want to see that in the source code. If someone e.g.
> > > > changes the parameters or the locking rules around a security hook,
> > > > how are they supposed to understand the implications if that happens
> > > > through some magic fexit trampoline that is injected at runtime?
> > >
> > > I'm not following the concern. There is error injection facility that is
> > > heavily used with and without bpf. In this case there is really no difference
> > > whether trampoline is used with direct call or indirect callback via function
> > > pointer. Both will jump to bpf prog. The _source code_ of bpf program will
> > > _always_ be available for humans to examine via "bpftool prog dump" since BTF
> > > is required. So from inspectability and debuggability point of view lsm+bpf
> > > stuff is way more visible than any builtin LSM. At any time people will be able
> > > to see what exactly is running on the system. Assuming folks can read C code.
> >
> > You said that you want to use fexit without touching security/, which
> > AFAIU means that the branch from security_*() to the BPF LSM will be
> > invisible in the *kernel's* source code unless the reader already
> > knows about the BPF LSM. But maybe I'm just misunderstanding your
> > idea.
> >
> > If a random developer is trying to change the locking rules around
> > security_blah(), and wants to e.g. figure out whether it's okay to
> > call that thing with a spinlock held, or whether one of the arguments
> > is actually used, or stuff like that, the obvious way to verify that
> > is to follow all the direct and indirect calls made from
> > security_blah(). It's tedious, but it works, unless something is
> > hooked up to it in a way that is visible in no way in the source code.
> >
> > I agree that the way in which the call happens behind the scenes
> > doesn't matter all that much - I don't really care all that much
> > whether it's an indirect call, a runtime-patched direct call in inline
> > assembly, or an fexit hook. What I do care about is that someone
> > reading through any affected function can immediately see that the
> > branch exists - in other words, ideally, I'd like it to be something
> > happening in the method body, but if you think that's unacceptable, I
> > think there should at least be a function attribute that makes it very
> > clear what's going on.
>
> Got it. Then let's whitelist them ?
> All error injection points are marked with ALLOW_ERROR_INJECTION().
> We can do something similar here, but let's do it via BTF and avoid
> abusing yet another elf section for this mark.
> I think BTF_TYPE_EMIT() should work. Just need to pick explicit enough
> name and extensive comment about what is going on.

Sounds reasonable to me. :)

> Locking rules and cleanup around security_blah() shouldn't change though.
> Like security_task_alloc() should be paired with security_task_free().
> And so on. With bpf_sk_storage like logic the alloc/free of scratch
> space will be similar to the way socket and bpf progs deal with it.
>
> Some of the lsm hooks are in critical path. Like security_socket_sendmsg().
> retpoline hurts. If we go with indirect calls right now it will be harder to
> optimize later. It took us long time to come up with bpf trampoline and build
> bpf dispatcher on top of it to remove single indirect call from XDP runtime.
> For bpf+lsm would be good to avoid it from the start.

Just out of curiosity: Are fexit hooks really much cheaper than indirect calls?

AFAIK ftrace on x86-64 replaces the return pointer for fexit
instrumentation (see prepare_ftrace_return()). So when the function
returns, there is one return misprediction for branching into
return_to_handler(), and then the processor's internal return stack
will probably be misaligned so that after ftrace_return_to_handler()
is done running, all the following returns will also be mispredicted.

So I would've thought that fexit hooks would have at least roughly the
same impact as indirect calls - indirect calls via retpoline do one
mispredicted branch, fexit hooks do at least two AFAICS. But I guess
indirect calls could still be slower if fexit benefits from having all
the mispredicted pointers stored on the cache-hot stack while the
indirect branch target is too infrequently accessed to be in L1D, or
something like that?
Jann Horn Feb. 11, 2020, 9:32 p.m. UTC | #16
On Tue, Feb 11, 2020 at 9:33 PM Jann Horn <jannh@google.com> wrote:
> On Tue, Feb 11, 2020 at 9:10 PM Alexei Starovoitov
> <alexei.starovoitov@gmail.com> wrote:
> > On Tue, Feb 11, 2020 at 08:36:18PM +0100, Jann Horn wrote:
> > > On Tue, Feb 11, 2020 at 8:09 PM Alexei Starovoitov
> > > <alexei.starovoitov@gmail.com> wrote:
> > > > On Tue, Feb 11, 2020 at 07:44:05PM +0100, Jann Horn wrote:
> > > > > On Tue, Feb 11, 2020 at 6:58 PM Alexei Starovoitov
> > > > > <alexei.starovoitov@gmail.com> wrote:
> > > > > > On Tue, Feb 11, 2020 at 01:43:34PM +0100, KP Singh wrote:
> > > > > [...]
> > > > > > > * When using the semantic provided by fexit, the BPF LSM program will
> > > > > > >   always be executed and will be able to override / clobber the
> > > > > > >   decision of LSMs which appear before it in the ordered list. This
> > > > > > >   semantic is very different from what we currently have (i.e. the BPF
> > > > > > >   LSM hook is only called if all the other LSMs allow the action) and
> > > > > > >   seems to be bypassing the LSM framework.
> > > > > >
> > > > > > It that's a concern it's trivial to add 'if (RC == 0)' check to fexit
> > > > > > trampoline generator specific to lsm progs.
> > > > > [...]
> > > > > > Using fexit mechanism and bpf_sk_storage generalization is
> > > > > > all that is needed. None of it should touch security/*.
[...]
> > Some of the lsm hooks are in critical path. Like security_socket_sendmsg().
> > retpoline hurts. If we go with indirect calls right now it will be harder to
> > optimize later. It took us long time to come up with bpf trampoline and build
> > bpf dispatcher on top of it to remove single indirect call from XDP runtime.
> > For bpf+lsm would be good to avoid it from the start.
>
> Just out of curiosity: Are fexit hooks really much cheaper than indirect calls?
>
> AFAIK ftrace on x86-64 replaces the return pointer for fexit
> instrumentation (see prepare_ftrace_return()). So when the function
> returns, there is one return misprediction for branching into
> return_to_handler(), and then the processor's internal return stack
> will probably be misaligned so that after ftrace_return_to_handler()
> is done running, all the following returns will also be mispredicted.
>
> So I would've thought that fexit hooks would have at least roughly the
> same impact as indirect calls - indirect calls via retpoline do one
> mispredicted branch, fexit hooks do at least two AFAICS. But I guess
> indirect calls could still be slower if fexit benefits from having all
> the mispredicted pointers stored on the cache-hot stack while the
> indirect branch target is too infrequently accessed to be in L1D, or
> something like that?

Ah, kpsingh explained to me that BPF trampolines work differently from
normal ftrace and don't do the return address poking. Still, the
processor's internal call stack will be misaligned by the BPF
trampoline, right? Since there is one more call than return, if e.g.
security_blah() is hooked, then the kernel will probably predict
security_blah+5 as the target when returning from the trampoline to
security_blah's parent, then when returning from security_blah's
parent to the grandparent predict the parent, and so on, right?
Alexei Starovoitov Feb. 11, 2020, 9:38 p.m. UTC | #17
On Tue, Feb 11, 2020 at 09:33:49PM +0100, Jann Horn wrote:
> >
> > Got it. Then let's whitelist them ?
> > All error injection points are marked with ALLOW_ERROR_INJECTION().
> > We can do something similar here, but let's do it via BTF and avoid
> > abusing yet another elf section for this mark.
> > I think BTF_TYPE_EMIT() should work. Just need to pick explicit enough
> > name and extensive comment about what is going on.
> 
> Sounds reasonable to me. :)

awesome :)

> > Locking rules and cleanup around security_blah() shouldn't change though.
> > Like security_task_alloc() should be paired with security_task_free().
> > And so on. With bpf_sk_storage like logic the alloc/free of scratch
> > space will be similar to the way socket and bpf progs deal with it.
> >
> > Some of the lsm hooks are in critical path. Like security_socket_sendmsg().
> > retpoline hurts. If we go with indirect calls right now it will be harder to
> > optimize later. It took us long time to come up with bpf trampoline and build
> > bpf dispatcher on top of it to remove single indirect call from XDP runtime.
> > For bpf+lsm would be good to avoid it from the start.
> 
> Just out of curiosity: Are fexit hooks really much cheaper than indirect calls?
> 
> AFAIK ftrace on x86-64 replaces the return pointer for fexit
> instrumentation (see prepare_ftrace_return()). So when the function
> returns, there is one return misprediction for branching into
> return_to_handler(), and then the processor's internal return stack
> will probably be misaligned so that after ftrace_return_to_handler()
> is done running, all the following returns will also be mispredicted.
> 
> So I would've thought that fexit hooks would have at least roughly the
> same impact as indirect calls - indirect calls via retpoline do one
> mispredicted branch, fexit hooks do at least two AFAICS. But I guess
> indirect calls could still be slower if fexit benefits from having all
> the mispredicted pointers stored on the cache-hot stack while the
> indirect branch target is too infrequently accessed to be in L1D, or
> something like that?

For fexit I've tried ftrace style of overwriting return address in the stack,
but it was slower than "leave; add rsp, 8; ret". So I went with that.
Looks like skipping one frame makes intel return stack prediction work better
than overwriting. I tested on broadwell only though. Later I realized that I
can do "jmp bpf_trampoline" instead of "call bpf_trampoline", so this extra
'add rsp, 8' can be removed and both direct jump and return stack predictors
will be happy. Will be posting this patch soon.

Old perf numbers of fexit vs original vs kprobe:
https://lore.kernel.org/bpf/20191122011515.255371-1-ast@kernel.org/
Alexei Starovoitov Feb. 11, 2020, 11:26 p.m. UTC | #18
On Tue, Feb 11, 2020 at 1:38 PM Alexei Starovoitov
<alexei.starovoitov@gmail.com> wrote:
>
> On Tue, Feb 11, 2020 at 09:33:49PM +0100, Jann Horn wrote:
> > >
> > > Got it. Then let's whitelist them ?
> > > All error injection points are marked with ALLOW_ERROR_INJECTION().
> > > We can do something similar here, but let's do it via BTF and avoid
> > > abusing yet another elf section for this mark.
> > > I think BTF_TYPE_EMIT() should work. Just need to pick explicit enough
> > > name and extensive comment about what is going on.
> >
> > Sounds reasonable to me. :)
>
> awesome :)

Looks like the kernel already provides this whitelisting.
$ bpftool btf dump file /sys/kernel/btf/vmlinux |grep FUNC|grep '\<security_'
gives the list of all LSM hooks that lsm-bpf will be able to attach to.
There are two exceptions there security_add_hooks() and security_init().
Both are '__init'. Too late for lsm-bpf to touch.
So filtering BTF funcs by 'security_' prefix will be enough.
It should be documented though.
The number of attachable funcs depends on kconfig which is
a nice property and further strengthen the point that
lsm-bpf is very much kernel specific.
We probably should blacklist security_bpf*() hooks though.
Otherwise inception fans will have a field day.
Disallowing bpf with bpf :)
Daniel Borkmann Feb. 12, 2020, 12:09 a.m. UTC | #19
On 2/12/20 12:26 AM, Alexei Starovoitov wrote:
> On Tue, Feb 11, 2020 at 1:38 PM Alexei Starovoitov
> <alexei.starovoitov@gmail.com> wrote:
>>
>> On Tue, Feb 11, 2020 at 09:33:49PM +0100, Jann Horn wrote:
>>>>
>>>> Got it. Then let's whitelist them ?
>>>> All error injection points are marked with ALLOW_ERROR_INJECTION().
>>>> We can do something similar here, but let's do it via BTF and avoid
>>>> abusing yet another elf section for this mark.
>>>> I think BTF_TYPE_EMIT() should work. Just need to pick explicit enough
>>>> name and extensive comment about what is going on.
>>>
>>> Sounds reasonable to me. :)
>>
>> awesome :)
> 
> Looks like the kernel already provides this whitelisting.
> $ bpftool btf dump file /sys/kernel/btf/vmlinux |grep FUNC|grep '\<security_'
> gives the list of all LSM hooks that lsm-bpf will be able to attach to.
> There are two exceptions there security_add_hooks() and security_init().
> Both are '__init'. Too late for lsm-bpf to touch.
> So filtering BTF funcs by 'security_' prefix will be enough.
> It should be documented though.
> The number of attachable funcs depends on kconfig which is
> a nice property and further strengthen the point that
> lsm-bpf is very much kernel specific.
> We probably should blacklist security_bpf*() hooks though.

One thing that is not quite clear to me wrt the fexit approach; assuming
we'd whitelist something like security_inode_link():

int security_inode_link(struct dentry *old_dentry, struct inode *dir,
                          struct dentry *new_dentry)
{
         if (unlikely(IS_PRIVATE(d_backing_inode(old_dentry))))
                 return 0;
         return call_int_hook(inode_link, 0, old_dentry, dir, new_dentry);
}

Would this then mean the BPF prog needs to reimplement above check by
probing old_dentry->d_inode to later ensure its verdict stays 0 there
too, or that such extra code is to be moved to call-sites instead? If
former, what about more complex logic?

Another approach could be to have a special nop inside call_int_hook()
macro which would then get patched to avoid these situations. Somewhat
similar like static keys where it could be defined anywhere in text but
with updating of call_int_hook()'s RC for the verdict.

Thanks,
Daniel
Alexei Starovoitov Feb. 12, 2020, 2:45 a.m. UTC | #20
On Wed, Feb 12, 2020 at 01:09:07AM +0100, Daniel Borkmann wrote:
> 
> Another approach could be to have a special nop inside call_int_hook()
> macro which would then get patched to avoid these situations. Somewhat
> similar like static keys where it could be defined anywhere in text but
> with updating of call_int_hook()'s RC for the verdict.

Sounds nice in theory. I couldn't quite picture how that would look
in the code, so I hacked:
diff --git a/security/security.c b/security/security.c
index 565bc9b67276..ce4bc1e5e26c 100644
--- a/security/security.c
+++ b/security/security.c
@@ -28,6 +28,7 @@
 #include <linux/string.h>
 #include <linux/msg.h>
 #include <net/flow.h>
+#include <linux/jump_label.h>

 #define MAX_LSM_EVM_XATTR      2

@@ -678,12 +679,26 @@ static void __init lsm_early_task(struct task_struct *task)
  *     This is a hook that returns a value.
  */

+#define LSM_HOOK_NAME(FUNC) \
+       DEFINE_STATIC_KEY_FALSE(bpf_lsm_key_##FUNC);
+#include <linux/lsm_hook_names.h>
+#undef LSM_HOOK_NAME
+__diag_push();
+__diag_ignore(GCC, 8, "-Wstrict-prototypes", "");
+#define LSM_HOOK_NAME(FUNC) \
+       int bpf_lsm_call_##FUNC() {return 0;}
+#include <linux/lsm_hook_names.h>
+#undef LSM_HOOK_NAME
+__diag_pop();
+
 #define call_void_hook(FUNC, ...)                              \
        do {                                                    \
                struct security_hook_list *P;                   \
                                                                \
                hlist_for_each_entry(P, &security_hook_heads.FUNC, list) \
                        P->hook.FUNC(__VA_ARGS__);              \
+               if (static_branch_unlikely(&bpf_lsm_key_##FUNC)) \
+                      (void)bpf_lsm_call_##FUNC(__VA_ARGS__); \
        } while (0)

 #define call_int_hook(FUNC, IRC, ...) ({                       \
@@ -696,6 +711,8 @@ static void __init lsm_early_task(struct task_struct *task)
                        if (RC != 0)                            \
                                break;                          \
                }                                               \
+               if (RC == IRC && static_branch_unlikely(&bpf_lsm_key_##FUNC)) \
+                      RC = bpf_lsm_call_##FUNC(__VA_ARGS__); \
        } while (0);                                            \
        RC;                                                     \
 })

The assembly looks good from correctness and performance points.
union security_list_options can be split into lsm_hook_names.h too
to avoid __diag_ignore. Is that what you have in mind?
I don't see how one can improve call_int_hook() macro without
full refactoring of linux/lsm_hooks.h
imo static_key doesn't have to be there in the first set. We can add this
optimization later.
Daniel Borkmann Feb. 12, 2020, 1:27 p.m. UTC | #21
On 2/12/20 3:45 AM, Alexei Starovoitov wrote:
> On Wed, Feb 12, 2020 at 01:09:07AM +0100, Daniel Borkmann wrote:
>>
>> Another approach could be to have a special nop inside call_int_hook()
>> macro which would then get patched to avoid these situations. Somewhat
>> similar like static keys where it could be defined anywhere in text but
>> with updating of call_int_hook()'s RC for the verdict.
> 
> Sounds nice in theory. I couldn't quite picture how that would look
> in the code, so I hacked:
> diff --git a/security/security.c b/security/security.c
> index 565bc9b67276..ce4bc1e5e26c 100644
> --- a/security/security.c
> +++ b/security/security.c
> @@ -28,6 +28,7 @@
>   #include <linux/string.h>
>   #include <linux/msg.h>
>   #include <net/flow.h>
> +#include <linux/jump_label.h>
> 
>   #define MAX_LSM_EVM_XATTR      2
> 
> @@ -678,12 +679,26 @@ static void __init lsm_early_task(struct task_struct *task)
>    *     This is a hook that returns a value.
>    */
> 
> +#define LSM_HOOK_NAME(FUNC) \
> +       DEFINE_STATIC_KEY_FALSE(bpf_lsm_key_##FUNC);
> +#include <linux/lsm_hook_names.h>
> +#undef LSM_HOOK_NAME
> +__diag_push();
> +__diag_ignore(GCC, 8, "-Wstrict-prototypes", "");
> +#define LSM_HOOK_NAME(FUNC) \
> +       int bpf_lsm_call_##FUNC() {return 0;}
> +#include <linux/lsm_hook_names.h>
> +#undef LSM_HOOK_NAME
> +__diag_pop();
> +
>   #define call_void_hook(FUNC, ...)                              \
>          do {                                                    \
>                  struct security_hook_list *P;                   \
>                                                                  \
>                  hlist_for_each_entry(P, &security_hook_heads.FUNC, list) \
>                          P->hook.FUNC(__VA_ARGS__);              \
> +               if (static_branch_unlikely(&bpf_lsm_key_##FUNC)) \
> +                      (void)bpf_lsm_call_##FUNC(__VA_ARGS__); \
>          } while (0)
> 
>   #define call_int_hook(FUNC, IRC, ...) ({                       \
> @@ -696,6 +711,8 @@ static void __init lsm_early_task(struct task_struct *task)
>                          if (RC != 0)                            \
>                                  break;                          \
>                  }                                               \
> +               if (RC == IRC && static_branch_unlikely(&bpf_lsm_key_##FUNC)) \
> +                      RC = bpf_lsm_call_##FUNC(__VA_ARGS__); \

Nit: the `RC == IRC` test could be moved behind the static_branch_unlikely() so
that it would be bypassed when not enabled.

>          } while (0);                                            \
>          RC;                                                     \
>   })
> 
> The assembly looks good from correctness and performance points.
> union security_list_options can be split into lsm_hook_names.h too
> to avoid __diag_ignore. Is that what you have in mind?
> I don't see how one can improve call_int_hook() macro without
> full refactoring of linux/lsm_hooks.h
> imo static_key doesn't have to be there in the first set. We can add this
> optimization later.

Yes, like the above diff looks good, and then we'd dynamically attach the program
at bpf_lsm_call_##FUNC()'s fexit hook for a direct jump, so all the security_blah()
internals could stay as-is which then might also address Jann's concerns wrt
concrete annotation as well as potential locking changes inside security_blah().
Agree that patching out via static key could be optional but since you were talking
about avoiding indirect jumps..

Thanks,
Daniel
Casey Schaufler Feb. 12, 2020, 3:52 p.m. UTC | #22
On 2/11/2020 6:45 PM, Alexei Starovoitov wrote:
> On Wed, Feb 12, 2020 at 01:09:07AM +0100, Daniel Borkmann wrote:
>> Another approach could be to have a special nop inside call_int_hook()
>> macro which would then get patched to avoid these situations. Somewhat
>> similar like static keys where it could be defined anywhere in text but
>> with updating of call_int_hook()'s RC for the verdict.

Tell me again why you can't register your BPF hooks like all the
other security modules do? You keep reintroducing BPF as a special
case, and I don't see why.

> Sounds nice in theory. I couldn't quite picture how that would look
> in the code, so I hacked:
> diff --git a/security/security.c b/security/security.c
> index 565bc9b67276..ce4bc1e5e26c 100644
> --- a/security/security.c
> +++ b/security/security.c
> @@ -28,6 +28,7 @@
>  #include <linux/string.h>
>  #include <linux/msg.h>
>  #include <net/flow.h>
> +#include <linux/jump_label.h>
>
>  #define MAX_LSM_EVM_XATTR      2
>
> @@ -678,12 +679,26 @@ static void __init lsm_early_task(struct task_struct *task)
>   *     This is a hook that returns a value.
>   */
>
> +#define LSM_HOOK_NAME(FUNC) \
> +       DEFINE_STATIC_KEY_FALSE(bpf_lsm_key_##FUNC);
> +#include <linux/lsm_hook_names.h>
> +#undef LSM_HOOK_NAME
> +__diag_push();
> +__diag_ignore(GCC, 8, "-Wstrict-prototypes", "");
> +#define LSM_HOOK_NAME(FUNC) \
> +       int bpf_lsm_call_##FUNC() {return 0;}
> +#include <linux/lsm_hook_names.h>
> +#undef LSM_HOOK_NAME
> +__diag_pop();
> +
>  #define call_void_hook(FUNC, ...)                              \
>         do {                                                    \
>                 struct security_hook_list *P;                   \
>                                                                 \
>                 hlist_for_each_entry(P, &security_hook_heads.FUNC, list) \
>                         P->hook.FUNC(__VA_ARGS__);              \
> +               if (static_branch_unlikely(&bpf_lsm_key_##FUNC)) \
> +                      (void)bpf_lsm_call_##FUNC(__VA_ARGS__); \
>         } while (0)
>
>  #define call_int_hook(FUNC, IRC, ...) ({                       \
> @@ -696,6 +711,8 @@ static void __init lsm_early_task(struct task_struct *task)
>                         if (RC != 0)                            \
>                                 break;                          \
>                 }                                               \
> +               if (RC == IRC && static_branch_unlikely(&bpf_lsm_key_##FUNC)) \
> +                      RC = bpf_lsm_call_##FUNC(__VA_ARGS__); \
>         } while (0);                                            \
>         RC;                                                     \
>  })
>
> The assembly looks good from correctness and performance points.
> union security_list_options can be split into lsm_hook_names.h too
> to avoid __diag_ignore. Is that what you have in mind?
> I don't see how one can improve call_int_hook() macro without
> full refactoring of linux/lsm_hooks.h
> imo static_key doesn't have to be there in the first set. We can add this
> optimization later.
KP Singh Feb. 12, 2020, 4:04 p.m. UTC | #23
On 12-Feb 14:27, Daniel Borkmann wrote:
> On 2/12/20 3:45 AM, Alexei Starovoitov wrote:
> > On Wed, Feb 12, 2020 at 01:09:07AM +0100, Daniel Borkmann wrote:
> > > 
> > > Another approach could be to have a special nop inside call_int_hook()
> > > macro which would then get patched to avoid these situations. Somewhat
> > > similar like static keys where it could be defined anywhere in text but
> > > with updating of call_int_hook()'s RC for the verdict.
> > 
> > Sounds nice in theory. I couldn't quite picture how that would look
> > in the code, so I hacked:
> > diff --git a/security/security.c b/security/security.c
> > index 565bc9b67276..ce4bc1e5e26c 100644
> > --- a/security/security.c
> > +++ b/security/security.c
> > @@ -28,6 +28,7 @@
> >   #include <linux/string.h>
> >   #include <linux/msg.h>
> >   #include <net/flow.h>
> > +#include <linux/jump_label.h>
> > 
> >   #define MAX_LSM_EVM_XATTR      2
> > 
> > @@ -678,12 +679,26 @@ static void __init lsm_early_task(struct task_struct *task)
> >    *     This is a hook that returns a value.
> >    */
> > 
> > +#define LSM_HOOK_NAME(FUNC) \
> > +       DEFINE_STATIC_KEY_FALSE(bpf_lsm_key_##FUNC);
> > +#include <linux/lsm_hook_names.h>
> > +#undef LSM_HOOK_NAME
> > +__diag_push();
> > +__diag_ignore(GCC, 8, "-Wstrict-prototypes", "");
> > +#define LSM_HOOK_NAME(FUNC) \
> > +       int bpf_lsm_call_##FUNC() {return 0;}
> > +#include <linux/lsm_hook_names.h>
> > +#undef LSM_HOOK_NAME
> > +__diag_pop();
> > +
> >   #define call_void_hook(FUNC, ...)                              \
> >          do {                                                    \
> >                  struct security_hook_list *P;                   \
> >                                                                  \
> >                  hlist_for_each_entry(P, &security_hook_heads.FUNC, list) \
> >                          P->hook.FUNC(__VA_ARGS__);              \
> > +               if (static_branch_unlikely(&bpf_lsm_key_##FUNC)) \
> > +                      (void)bpf_lsm_call_##FUNC(__VA_ARGS__); \
> >          } while (0)
> > 
> >   #define call_int_hook(FUNC, IRC, ...) ({                       \
> > @@ -696,6 +711,8 @@ static void __init lsm_early_task(struct task_struct *task)
> >                          if (RC != 0)                            \
> >                                  break;                          \
> >                  }                                               \
> > +               if (RC == IRC && static_branch_unlikely(&bpf_lsm_key_##FUNC)) \
> > +                      RC = bpf_lsm_call_##FUNC(__VA_ARGS__); \
> 
> Nit: the `RC == IRC` test could be moved behind the static_branch_unlikely() so
> that it would be bypassed when not enabled.
> 
> >          } while (0);                                            \
> >          RC;                                                     \
> >   })
> > 
> > The assembly looks good from correctness and performance points.
> > union security_list_options can be split into lsm_hook_names.h too
> > to avoid __diag_ignore. Is that what you have in mind?
> > I don't see how one can improve call_int_hook() macro without
> > full refactoring of linux/lsm_hooks.h
> > imo static_key doesn't have to be there in the first set. We can add this
> > optimization later.
> 
> Yes, like the above diff looks good, and then we'd dynamically attach the program
> at bpf_lsm_call_##FUNC()'s fexit hook for a direct jump, so all the security_blah()
> internals could stay as-is which then might also address Jann's concerns wrt
> concrete annotation as well as potential locking changes inside security_blah().
> Agree that patching out via static key could be optional but since you were talking
> about avoiding indirect jumps..

I like this approach as well. Will give it a go and update the
patches. Thanks a lot for your inputs!

- KP

> 
> Thanks,
> Daniel
KP Singh Feb. 12, 2020, 4:26 p.m. UTC | #24
On 12-Feb 07:52, Casey Schaufler wrote:
> On 2/11/2020 6:45 PM, Alexei Starovoitov wrote:
> > On Wed, Feb 12, 2020 at 01:09:07AM +0100, Daniel Borkmann wrote:
> >> Another approach could be to have a special nop inside call_int_hook()
> >> macro which would then get patched to avoid these situations. Somewhat
> >> similar like static keys where it could be defined anywhere in text but
> >> with updating of call_int_hook()'s RC for the verdict.
> 
> Tell me again why you can't register your BPF hooks like all the
> other security modules do? You keep reintroducing BPF as a special
> case, and I don't see why.

I think we tried to answer this in the discussion we had:

 https://lore.kernel.org/bpf/20200123152440.28956-1-kpsingh@chromium.org/T/#meb1eea982e63be0806f9bba58e91160871803752

BPF should not allocate a wrapper (to be statically regsitered at
init) for each LSM hook and run the programs from within that as this
implies adding overhead across the board for every hook even if
it's never used (i.e. no BPF program is attached to the hook).

We can, with the suggestions discussed here, avoid adding unncessary
overhead for unused hooks. And, as Alexei mentioned, adding overhead
when not really needed is especially bad for LSM hooks like
sock_sendmsg.

The other LSMs do not provide dynamic / mutable hooks, so it makes
sense for them to register the hooks once at load time.

- KP

> > Sounds nice in theory. I couldn't quite picture how that would look
> > in the code, so I hacked:
> > diff --git a/security/security.c b/security/security.c
> > index 565bc9b67276..ce4bc1e5e26c 100644
> > --- a/security/security.c

[...]
Casey Schaufler Feb. 12, 2020, 6:59 p.m. UTC | #25
On 2/12/2020 8:26 AM, KP Singh wrote:
> On 12-Feb 07:52, Casey Schaufler wrote:
>> On 2/11/2020 6:45 PM, Alexei Starovoitov wrote:
>>> On Wed, Feb 12, 2020 at 01:09:07AM +0100, Daniel Borkmann wrote:
>>>> Another approach could be to have a special nop inside call_int_hook()
>>>> macro which would then get patched to avoid these situations. Somewhat
>>>> similar like static keys where it could be defined anywhere in text but
>>>> with updating of call_int_hook()'s RC for the verdict.
>> Tell me again why you can't register your BPF hooks like all the
>> other security modules do? You keep reintroducing BPF as a special
>> case, and I don't see why.
> I think we tried to answer this in the discussion we had:
>
>  https://lore.kernel.org/bpf/20200123152440.28956-1-kpsingh@chromium.org/T/#meb1eea982e63be0806f9bba58e91160871803752

I understand your arguments, but remain unconvinced.

> BPF should not allocate a wrapper (to be statically regsitered at
> init) for each LSM hook and run the programs from within that as this
> implies adding overhead across the board for every hook even if
> it's never used (i.e. no BPF program is attached to the hook).

SELinux would run faster if it didn't have hooks installed where
there is no policy loaded that would ever fail for them. That's
not the infrastructure's problem.

> We can, with the suggestions discussed here, avoid adding unncessary
> overhead for unused hooks. And, as Alexei mentioned, adding overhead
> when not really needed is especially bad for LSM hooks like
> sock_sendmsg.

You're adding overhead for systems that have BPF built, but not used.

> The other LSMs do not provide dynamic / mutable hooks, so it makes
> sense for them to register the hooks once at load time.

As mentioned above, the hooks may not be mutable, but policy
may make them pointless. That is the security module's problem,
not the infrastructure's.

>
> - KP
>
>>> Sounds nice in theory. I couldn't quite picture how that would look
>>> in the code, so I hacked:
>>> diff --git a/security/security.c b/security/security.c
>>> index 565bc9b67276..ce4bc1e5e26c 100644
>>> --- a/security/security.c
> [...]
diff mbox series

Patch

diff --git a/MAINTAINERS b/MAINTAINERS
index e2b7f76a1a70..c606b3d89992 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -3209,6 +3209,7 @@  L:	linux-security-module@vger.kernel.org
 L:	bpf@vger.kernel.org
 S:	Maintained
 F:	security/bpf/
+F:	include/linux/bpf_lsm.h
 
 BROADCOM B44 10/100 ETHERNET DRIVER
 M:	Michael Chan <michael.chan@broadcom.com>
diff --git a/include/linux/bpf_lsm.h b/include/linux/bpf_lsm.h
new file mode 100644
index 000000000000..57c20b2cd2f4
--- /dev/null
+++ b/include/linux/bpf_lsm.h
@@ -0,0 +1,72 @@ 
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/*
+ * Copyright 2019 Google LLC.
+ */
+
+#ifndef _LINUX_BPF_LSM_H
+#define _LINUX_BPF_LSM_H
+
+#include <linux/bpf.h>
+#include <linux/lsm_hooks.h>
+
+#ifdef CONFIG_SECURITY_BPF
+
+/* Mutable hooks defined at runtime and executed after all the statically
+ * defined LSM hooks.
+ */
+extern struct security_hook_heads bpf_lsm_hook_heads;
+
+int bpf_lsm_srcu_read_lock(void);
+void bpf_lsm_srcu_read_unlock(int idx);
+
+#define CALL_BPF_LSM_VOID_HOOKS(FUNC, ...)			\
+	do {							\
+		struct security_hook_list *P;			\
+		int _idx;					\
+								\
+		if (hlist_empty(&bpf_lsm_hook_heads.FUNC))	\
+			break;					\
+								\
+		_idx = bpf_lsm_srcu_read_lock();		\
+		hlist_for_each_entry(P, &bpf_lsm_hook_heads.FUNC, list) \
+			P->hook.FUNC(__VA_ARGS__);		\
+		bpf_lsm_srcu_read_unlock(_idx);			\
+	} while (0)
+
+#define CALL_BPF_LSM_INT_HOOKS(FUNC, ...) ({			\
+	int _ret = 0;						\
+	do {							\
+		struct security_hook_list *P;			\
+		int _idx;					\
+								\
+		if (hlist_empty(&bpf_lsm_hook_heads.FUNC))	\
+			break;					\
+								\
+		_idx = bpf_lsm_srcu_read_lock();		\
+								\
+		hlist_for_each_entry(P,				\
+			&bpf_lsm_hook_heads.FUNC, list) {	\
+			_ret = P->hook.FUNC(__VA_ARGS__);		\
+			if (_ret && IS_ENABLED(CONFIG_SECURITY_BPF_ENFORCE)) \
+				break;				\
+		}						\
+		bpf_lsm_srcu_read_unlock(_idx);			\
+	} while (0);						\
+	IS_ENABLED(CONFIG_SECURITY_BPF_ENFORCE) ? _ret : 0;	\
+})
+
+#else /* !CONFIG_SECURITY_BPF */
+
+#define CALL_BPF_LSM_INT_HOOKS(FUNC, ...) (0)
+#define CALL_BPF_LSM_VOID_HOOKS(...)
+
+static inline int bpf_lsm_srcu_read_lock(void)
+{
+	return 0;
+}
+static inline void bpf_lsm_srcu_read_unlock(int idx) {}
+
+#endif /* CONFIG_SECURITY_BPF */
+
+#endif /* _LINUX_BPF_LSM_H */
diff --git a/security/bpf/Kconfig b/security/bpf/Kconfig
index a5f6c67ae526..595e4ad597ae 100644
--- a/security/bpf/Kconfig
+++ b/security/bpf/Kconfig
@@ -6,6 +6,7 @@  config SECURITY_BPF
 	bool "BPF-based MAC and audit policy"
 	depends on SECURITY
 	depends on BPF_SYSCALL
+	depends on SRCU
 	help
 	  This enables instrumentation of the security hooks with
 	  eBPF programs.
diff --git a/security/bpf/Makefile b/security/bpf/Makefile
index c78a8a056e7e..c526927c337d 100644
--- a/security/bpf/Makefile
+++ b/security/bpf/Makefile
@@ -2,4 +2,4 @@ 
 #
 # Copyright 2019 Google LLC.
 
-obj-$(CONFIG_SECURITY_BPF) := lsm.o ops.o
+obj-$(CONFIG_SECURITY_BPF) := lsm.o ops.o hooks.o
diff --git a/security/bpf/hooks.c b/security/bpf/hooks.c
new file mode 100644
index 000000000000..b123d9cb4cd4
--- /dev/null
+++ b/security/bpf/hooks.c
@@ -0,0 +1,20 @@ 
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * Copyright 2019 Google LLC.
+ */
+
+#include <linux/bpf_lsm.h>
+#include <linux/srcu.h>
+
+DEFINE_STATIC_SRCU(security_hook_srcu);
+
+int bpf_lsm_srcu_read_lock(void)
+{
+	return srcu_read_lock(&security_hook_srcu);
+}
+
+void bpf_lsm_srcu_read_unlock(int idx)
+{
+	return srcu_read_unlock(&security_hook_srcu, idx);
+}
diff --git a/security/bpf/lsm.c b/security/bpf/lsm.c
index dc9ac03c7aa0..a25a068e1781 100644
--- a/security/bpf/lsm.c
+++ b/security/bpf/lsm.c
@@ -4,6 +4,7 @@ 
  * Copyright 2019 Google LLC.
  */
 
+#include <linux/bpf_lsm.h>
 #include <linux/lsm_hooks.h>
 
 /* This is only for internal hooks, always statically shipped as part of the
@@ -12,6 +13,12 @@ 
  */
 static struct security_hook_list bpf_lsm_hooks[] __lsm_ro_after_init = {};
 
+/* Security hooks registered dynamically by the BPF LSM and must be accessed
+ * by holding bpf_lsm_srcu_read_lock and bpf_lsm_srcu_read_unlock. The mutable
+ * hooks dynamically allocated by the BPF LSM are appeneded here.
+ */
+struct security_hook_heads bpf_lsm_hook_heads;
+
 static int __init bpf_lsm_init(void)
 {
 	security_add_hooks(bpf_lsm_hooks, ARRAY_SIZE(bpf_lsm_hooks), "bpf");
diff --git a/security/security.c b/security/security.c
index 30a8aa700557..95a46ca25dcd 100644
--- a/security/security.c
+++ b/security/security.c
@@ -27,6 +27,7 @@ 
 #include <linux/backing-dev.h>
 #include <linux/string.h>
 #include <linux/msg.h>
+#include <linux/bpf_lsm.h>
 #include <net/flow.h>
 
 #define MAX_LSM_EVM_XATTR	2
@@ -657,20 +658,22 @@  static void __init lsm_early_task(struct task_struct *task)
 								\
 		hlist_for_each_entry(P, &security_hook_heads.FUNC, list) \
 			P->hook.FUNC(__VA_ARGS__);		\
+		CALL_BPF_LSM_VOID_HOOKS(FUNC, __VA_ARGS__);	\
 	} while (0)
 
-#define call_int_hook(FUNC, IRC, ...) ({			\
-	int RC = IRC;						\
-	do {							\
-		struct security_hook_list *P;			\
-								\
+#define call_int_hook(FUNC, IRC, ...) ({				\
+	int RC = IRC;							\
+	do {								\
+		struct security_hook_list *P;				\
 		hlist_for_each_entry(P, &security_hook_heads.FUNC, list) { \
-			RC = P->hook.FUNC(__VA_ARGS__);		\
-			if (RC != 0)				\
-				break;				\
-		}						\
-	} while (0);						\
-	RC;							\
+			RC = P->hook.FUNC(__VA_ARGS__);			\
+			if (RC != 0)					\
+				break;					\
+		}							\
+		if (RC == 0)						\
+			RC = CALL_BPF_LSM_INT_HOOKS(FUNC, __VA_ARGS__);	\
+	} while (0);							\
+	RC;								\
 })
 
 /* Security operations */