diff mbox series

[v2,2/5] security: Count the LSMs enabled at compile time

Message ID 20230616000441.3677441-3-kpsingh@kernel.org (mailing list archive)
State Not Applicable
Headers show
Series Reduce overhead of LSMs with static calls | expand

Checks

Context Check Description
netdev/tree_selection success Not a local patch, async
bpf/vmtest-bpf-PR success PR summary
bpf/vmtest-bpf-VM_Test-1 success Logs for ShellCheck
bpf/vmtest-bpf-VM_Test-2 success Logs for build for aarch64 with gcc
bpf/vmtest-bpf-VM_Test-3 success Logs for build for s390x with gcc
bpf/vmtest-bpf-VM_Test-4 success Logs for build for x86_64 with gcc
bpf/vmtest-bpf-VM_Test-5 success Logs for build for x86_64 with llvm-16
bpf/vmtest-bpf-VM_Test-6 success Logs for set-matrix
bpf/vmtest-bpf-VM_Test-7 success Logs for test_maps on aarch64 with gcc
bpf/vmtest-bpf-VM_Test-8 success Logs for test_maps on s390x with gcc
bpf/vmtest-bpf-VM_Test-9 success Logs for test_maps on x86_64 with gcc
bpf/vmtest-bpf-VM_Test-10 success Logs for test_maps on x86_64 with llvm-16
bpf/vmtest-bpf-VM_Test-11 success Logs for test_progs on aarch64 with gcc
bpf/vmtest-bpf-VM_Test-12 success Logs for test_progs on s390x with gcc
bpf/vmtest-bpf-VM_Test-13 success Logs for test_progs on x86_64 with gcc
bpf/vmtest-bpf-VM_Test-14 success Logs for test_progs on x86_64 with llvm-16
bpf/vmtest-bpf-VM_Test-15 success Logs for test_progs_no_alu32 on aarch64 with gcc
bpf/vmtest-bpf-VM_Test-16 success Logs for test_progs_no_alu32 on s390x with gcc
bpf/vmtest-bpf-VM_Test-17 success Logs for test_progs_no_alu32 on x86_64 with gcc
bpf/vmtest-bpf-VM_Test-18 success Logs for test_progs_no_alu32 on x86_64 with llvm-16
bpf/vmtest-bpf-VM_Test-19 success Logs for test_progs_no_alu32_parallel on aarch64 with gcc
bpf/vmtest-bpf-VM_Test-20 success Logs for test_progs_no_alu32_parallel on x86_64 with gcc
bpf/vmtest-bpf-VM_Test-21 success Logs for test_progs_no_alu32_parallel on x86_64 with llvm-16
bpf/vmtest-bpf-VM_Test-22 success Logs for test_progs_parallel on aarch64 with gcc
bpf/vmtest-bpf-VM_Test-23 success Logs for test_progs_parallel on x86_64 with gcc
bpf/vmtest-bpf-VM_Test-24 success Logs for test_progs_parallel on x86_64 with llvm-16
bpf/vmtest-bpf-VM_Test-25 success Logs for test_verifier on aarch64 with gcc
bpf/vmtest-bpf-VM_Test-26 success Logs for test_verifier on s390x with gcc
bpf/vmtest-bpf-VM_Test-27 success Logs for test_verifier on x86_64 with gcc
bpf/vmtest-bpf-VM_Test-28 success Logs for test_verifier on x86_64 with llvm-16
bpf/vmtest-bpf-VM_Test-29 success Logs for veristat

Commit Message

KP Singh June 16, 2023, 12:04 a.m. UTC
These macros are a clever trick to determine a count of the number of
LSMs that are enabled in the config to ascertain the maximum number of
static calls that need to be configured per LSM hook.

Without this one would need to generate static calls for (number of
possible LSMs * number of LSM hooks) which ends up being quite wasteful
especially when some LSMs are not compiled into the kernel.

Suggested-by: Kui-Feng Lee <sinquersw@gmail.com>
Signed-off-by: KP Singh <kpsingh@kernel.org>
---
 include/linux/lsm_count.h | 131 ++++++++++++++++++++++++++++++++++++++
 1 file changed, 131 insertions(+)
 create mode 100644 include/linux/lsm_count.h

Comments

Casey Schaufler June 16, 2023, 12:38 a.m. UTC | #1
On 6/15/2023 5:04 PM, KP Singh wrote:
> These macros are a clever trick to determine a count of the number of
> LSMs that are enabled in the config to ascertain the maximum number of
> static calls that need to be configured per LSM hook.
>
> Without this one would need to generate static calls for (number of
> possible LSMs * number of LSM hooks) which ends up being quite wasteful
> especially when some LSMs are not compiled into the kernel.
>
> Suggested-by: Kui-Feng Lee <sinquersw@gmail.com>
> Signed-off-by: KP Singh <kpsingh@kernel.org>
> ---
>  include/linux/lsm_count.h | 131 ++++++++++++++++++++++++++++++++++++++
>  1 file changed, 131 insertions(+)
>  create mode 100644 include/linux/lsm_count.h
>
> diff --git a/include/linux/lsm_count.h b/include/linux/lsm_count.h
> new file mode 100644
> index 000000000000..818f62ffa723
> --- /dev/null
> +++ b/include/linux/lsm_count.h
> @@ -0,0 +1,131 @@
> +/* SPDX-License-Identifier: GPL-2.0 */
> +
> +/*
> + * Copyright (C) 2023 Google LLC.
> + */
> +
> +#ifndef __LINUX_LSM_COUNT_H
> +#define __LINUX_LSM_COUNT_H
> +
> +#include <linux/kconfig.h>
> +
> +/*
> + * Macros to count the number of LSMs enabled in the kernel at compile time.
> + */
> +
> +#define __LSM_COUNT_15(x, y...) 15
> +#define __LSM_COUNT_14(x, y...) 14
> +#define __LSM_COUNT_13(x, y...) 13
> +#define __LSM_COUNT_12(x, y...) 12
> +#define __LSM_COUNT_11(x, y...) 11
> +#define __LSM_COUNT_10(x, y...) 10
> +#define __LSM_COUNT_9(x, y...) 9
> +#define __LSM_COUNT_8(x, y...) 8
> +#define __LSM_COUNT_7(x, y...) 7
> +#define __LSM_COUNT_6(x, y...) 6
> +#define __LSM_COUNT_5(x, y...) 5
> +#define __LSM_COUNT_4(x, y...) 4
> +#define __LSM_COUNT_3(x, y...) 3
> +#define __LSM_COUNT_2(x, y...) 2
> +#define __LSM_COUNT_1(x, y...) 1
> +#define __LSM_COUNT_0(x, y...) 0
> +
> +#define __LSM_COUNT1_15(x, y...) __LSM_COUNT ## x ## _15(y)
> +#define __LSM_COUNT1_14(x, y...) __LSM_COUNT ## x ## _14(y)
> +#define __LSM_COUNT1_13(x, y...) __LSM_COUNT ## x ## _13(y)
> +#define __LSM_COUNT1_12(x, y...) __LSM_COUNT ## x ## _12(y)
> +#define __LSM_COUNT1_10(x, y...) __LSM_COUNT ## x ## _11(y)
> +#define __LSM_COUNT1_9(x, y...) __LSM_COUNT ## x ## _10(y)
> +#define __LSM_COUNT1_8(x, y...) __LSM_COUNT ## x ## _9(y)
> +#define __LSM_COUNT1_7(x, y...) __LSM_COUNT ## x ## _8(y)
> +#define __LSM_COUNT1_6(x, y...) __LSM_COUNT ## x ## _7(y)
> +#define __LSM_COUNT1_5(x, y...) __LSM_COUNT ## x ## _6(y)
> +#define __LSM_COUNT1_4(x, y...) __LSM_COUNT ## x ## _5(y)
> +#define __LSM_COUNT1_3(x, y...) __LSM_COUNT ## x ## _4(y)
> +#define __LSM_COUNT1_2(x, y...) __LSM_COUNT ## x ## _3(y)
> +#define __LSM_COUNT1_1(x, y...) __LSM_COUNT ## x ## _2(y)
> +#define __LSM_COUNT1_0(x, y...) __LSM_COUNT ## x ## _1(y)
> +#define __LSM_COUNT(x, y...) __LSM_COUNT ## x ## _0(y)
> +
> +#define __LSM_COUNT_EXPAND(x...) __LSM_COUNT(x)
> +
> +#if IS_ENABLED(CONFIG_SECURITY)
> +#define CAPABILITIES_ENABLED 1,
> +#else
> +#define CAPABILITIES_ENABLED
> +#endif
> +
> +#if IS_ENABLED(CONFIG_SECURITY_SELINUX)
> +#define SELINUX_ENABLED 1,
> +#else
> +#define SELINUX_ENABLED
> +#endif
> +
> +#if IS_ENABLED(CONFIG_SECURITY_SMACK)
> +#define SMACK_ENABLED 1,
> +#else
> +#define SMACK_ENABLED
> +#endif
> +
> +#if IS_ENABLED(CONFIG_SECURITY_APPARMOR)
> +#define APPARMOR_ENABLED 1,
> +#else
> +#define APPARMOR_ENABLED
> +#endif
> +
> +#if IS_ENABLED(CONFIG_SECURITY_TOMOYO)
> +#define TOMOYO_ENABLED 1,
> +#else
> +#define TOMOYO_ENABLED
> +#endif
> +
> +#if IS_ENABLED(CONFIG_SECURITY_YAMA)
> +#define YAMA_ENABLED 1,
> +#else
> +#define YAMA_ENABLED
> +#endif
> +
> +#if IS_ENABLED(CONFIG_SECURITY_LOADPIN)
> +#define LOADPIN_ENABLED 1,
> +#else
> +#define LOADPIN_ENABLED
> +#endif
> +
> +#if IS_ENABLED(CONFIG_SECURITY_LOCKDOWN_LSM)
> +#define LOCKDOWN_ENABLED 1,
> +#else
> +#define LOCKDOWN_ENABLED
> +#endif
> +
> +#if IS_ENABLED(CONFIG_BPF_LSM)
> +#define BPF_LSM_ENABLED 1,
> +#else
> +#define BPF_LSM_ENABLED
> +#endif
> +
> +#if IS_ENABLED(CONFIG_BPF_LSM)
> +#define BPF_LSM_ENABLED 1,
> +#else
> +#define BPF_LSM_ENABLED
> +#endif
> +
> +#if IS_ENABLED(CONFIG_SECURITY_LANDLOCK)
> +#define LANDLOCK_ENABLED 1,
> +#else
> +#define LANDLOCK_ENABLED
> +#endif
> +
> +#define MAX_LSM_COUNT			\
> +	__LSM_COUNT_EXPAND(		\
> +		CAPABILITIES_ENABLED	\
> +		SELINUX_ENABLED		\
> +		SMACK_ENABLED		\
> +		APPARMOR_ENABLED	\
> +		TOMOYO_ENABLED		\
> +		YAMA_ENABLED		\
> +		LOADPIN_ENABLED		\
> +		LOCKDOWN_ENABLED	\
> +		BPF_LSM_ENABLED		\
> +		LANDLOCK_ENABLED)
> +

Wouldn't the following be simpler? It's from my LSM syscall patchset.
It certainly takes up fewer lines and would be easier to maintain
than the set of macros you've proposed.

+#define LSM_COUNT ( \
+	(IS_ENABLED(CONFIG_SECURITY) ? 1 : 0) + \
+	(IS_ENABLED(CONFIG_SECURITY_SELINUX) ? 1 : 0) + \
+	(IS_ENABLED(CONFIG_SECURITY_SMACK) ? 1 : 0) + \
+	(IS_ENABLED(CONFIG_SECURITY_TOMOYO) ? 1 : 0) + \
+	(IS_ENABLED(CONFIG_IMA) ? 1 : 0) + \
+	(IS_ENABLED(CONFIG_SECURITY_APPARMOR) ? 1 : 0) + \
+	(IS_ENABLED(CONFIG_SECURITY_YAMA) ? 1 : 0) + \
+	(IS_ENABLED(CONFIG_SECURITY_LOADPIN) ? 1 : 0) + \
+	(IS_ENABLED(CONFIG_SECURITY_SAFESETID) ? 1 : 0) + \
+	(IS_ENABLED(CONFIG_SECURITY_LOCKDOWN_LSM) ? 1 : 0) + \
+	(IS_ENABLED(CONFIG_BPF_LSM) ? 1 : 0) + \
+	(IS_ENABLED(CONFIG_SECURITY_LANDLOCK) ? 1 : 0))
 

> +#endif  /* __LINUX_LSM_COUNT_H */
Andrii Nakryiko June 16, 2023, 10:27 p.m. UTC | #2
On Thu, Jun 15, 2023 at 5:38 PM Casey Schaufler <casey@schaufler-ca.com> wrote:
>
> On 6/15/2023 5:04 PM, KP Singh wrote:
> > These macros are a clever trick to determine a count of the number of
> > LSMs that are enabled in the config to ascertain the maximum number of
> > static calls that need to be configured per LSM hook.
> >
> > Without this one would need to generate static calls for (number of
> > possible LSMs * number of LSM hooks) which ends up being quite wasteful
> > especially when some LSMs are not compiled into the kernel.
> >
> > Suggested-by: Kui-Feng Lee <sinquersw@gmail.com>
> > Signed-off-by: KP Singh <kpsingh@kernel.org>
> > ---
> >  include/linux/lsm_count.h | 131 ++++++++++++++++++++++++++++++++++++++
> >  1 file changed, 131 insertions(+)
> >  create mode 100644 include/linux/lsm_count.h
> >
> > diff --git a/include/linux/lsm_count.h b/include/linux/lsm_count.h
> > new file mode 100644
> > index 000000000000..818f62ffa723
> > --- /dev/null
> > +++ b/include/linux/lsm_count.h
> > @@ -0,0 +1,131 @@
> > +/* SPDX-License-Identifier: GPL-2.0 */
> > +
> > +/*
> > + * Copyright (C) 2023 Google LLC.
> > + */
> > +
> > +#ifndef __LINUX_LSM_COUNT_H
> > +#define __LINUX_LSM_COUNT_H
> > +
> > +#include <linux/kconfig.h>
> > +
> > +/*
> > + * Macros to count the number of LSMs enabled in the kernel at compile time.
> > + */
> > +
> > +#define __LSM_COUNT_15(x, y...) 15
> > +#define __LSM_COUNT_14(x, y...) 14
> > +#define __LSM_COUNT_13(x, y...) 13
> > +#define __LSM_COUNT_12(x, y...) 12
> > +#define __LSM_COUNT_11(x, y...) 11
> > +#define __LSM_COUNT_10(x, y...) 10
> > +#define __LSM_COUNT_9(x, y...) 9
> > +#define __LSM_COUNT_8(x, y...) 8
> > +#define __LSM_COUNT_7(x, y...) 7
> > +#define __LSM_COUNT_6(x, y...) 6
> > +#define __LSM_COUNT_5(x, y...) 5
> > +#define __LSM_COUNT_4(x, y...) 4
> > +#define __LSM_COUNT_3(x, y...) 3
> > +#define __LSM_COUNT_2(x, y...) 2
> > +#define __LSM_COUNT_1(x, y...) 1
> > +#define __LSM_COUNT_0(x, y...) 0
> > +
> > +#define __LSM_COUNT1_15(x, y...) __LSM_COUNT ## x ## _15(y)
> > +#define __LSM_COUNT1_14(x, y...) __LSM_COUNT ## x ## _14(y)
> > +#define __LSM_COUNT1_13(x, y...) __LSM_COUNT ## x ## _13(y)
> > +#define __LSM_COUNT1_12(x, y...) __LSM_COUNT ## x ## _12(y)
> > +#define __LSM_COUNT1_10(x, y...) __LSM_COUNT ## x ## _11(y)
> > +#define __LSM_COUNT1_9(x, y...) __LSM_COUNT ## x ## _10(y)
> > +#define __LSM_COUNT1_8(x, y...) __LSM_COUNT ## x ## _9(y)
> > +#define __LSM_COUNT1_7(x, y...) __LSM_COUNT ## x ## _8(y)
> > +#define __LSM_COUNT1_6(x, y...) __LSM_COUNT ## x ## _7(y)
> > +#define __LSM_COUNT1_5(x, y...) __LSM_COUNT ## x ## _6(y)
> > +#define __LSM_COUNT1_4(x, y...) __LSM_COUNT ## x ## _5(y)
> > +#define __LSM_COUNT1_3(x, y...) __LSM_COUNT ## x ## _4(y)
> > +#define __LSM_COUNT1_2(x, y...) __LSM_COUNT ## x ## _3(y)
> > +#define __LSM_COUNT1_1(x, y...) __LSM_COUNT ## x ## _2(y)
> > +#define __LSM_COUNT1_0(x, y...) __LSM_COUNT ## x ## _1(y)
> > +#define __LSM_COUNT(x, y...) __LSM_COUNT ## x ## _0(y)
> > +
> > +#define __LSM_COUNT_EXPAND(x...) __LSM_COUNT(x)
> > +
> > +#if IS_ENABLED(CONFIG_SECURITY)
> > +#define CAPABILITIES_ENABLED 1,
> > +#else
> > +#define CAPABILITIES_ENABLED
> > +#endif
> > +
> > +#if IS_ENABLED(CONFIG_SECURITY_SELINUX)
> > +#define SELINUX_ENABLED 1,
> > +#else
> > +#define SELINUX_ENABLED
> > +#endif
> > +
> > +#if IS_ENABLED(CONFIG_SECURITY_SMACK)
> > +#define SMACK_ENABLED 1,
> > +#else
> > +#define SMACK_ENABLED
> > +#endif
> > +
> > +#if IS_ENABLED(CONFIG_SECURITY_APPARMOR)
> > +#define APPARMOR_ENABLED 1,
> > +#else
> > +#define APPARMOR_ENABLED
> > +#endif
> > +
> > +#if IS_ENABLED(CONFIG_SECURITY_TOMOYO)
> > +#define TOMOYO_ENABLED 1,
> > +#else
> > +#define TOMOYO_ENABLED
> > +#endif
> > +
> > +#if IS_ENABLED(CONFIG_SECURITY_YAMA)
> > +#define YAMA_ENABLED 1,
> > +#else
> > +#define YAMA_ENABLED
> > +#endif
> > +
> > +#if IS_ENABLED(CONFIG_SECURITY_LOADPIN)
> > +#define LOADPIN_ENABLED 1,
> > +#else
> > +#define LOADPIN_ENABLED
> > +#endif
> > +
> > +#if IS_ENABLED(CONFIG_SECURITY_LOCKDOWN_LSM)
> > +#define LOCKDOWN_ENABLED 1,
> > +#else
> > +#define LOCKDOWN_ENABLED
> > +#endif
> > +
> > +#if IS_ENABLED(CONFIG_BPF_LSM)
> > +#define BPF_LSM_ENABLED 1,
> > +#else
> > +#define BPF_LSM_ENABLED
> > +#endif
> > +
> > +#if IS_ENABLED(CONFIG_BPF_LSM)
> > +#define BPF_LSM_ENABLED 1,
> > +#else
> > +#define BPF_LSM_ENABLED
> > +#endif

duplicate that redefined BPF_LSM_ENABLED unnecessarily

> > +
> > +#if IS_ENABLED(CONFIG_SECURITY_LANDLOCK)
> > +#define LANDLOCK_ENABLED 1,
> > +#else
> > +#define LANDLOCK_ENABLED
> > +#endif
> > +
> > +#define MAX_LSM_COUNT                        \
> > +     __LSM_COUNT_EXPAND(             \
> > +             CAPABILITIES_ENABLED    \
> > +             SELINUX_ENABLED         \
> > +             SMACK_ENABLED           \
> > +             APPARMOR_ENABLED        \
> > +             TOMOYO_ENABLED          \
> > +             YAMA_ENABLED            \
> > +             LOADPIN_ENABLED         \
> > +             LOCKDOWN_ENABLED        \
> > +             BPF_LSM_ENABLED         \
> > +             LANDLOCK_ENABLED)
> > +
>
> Wouldn't the following be simpler? It's from my LSM syscall patchset.

Of course it would be, but unfortunately it doesn't work with the
UNROLL() macro. This MAX_LSM_COUNT has to evaluate a compile-time
integer *literal* (not any sort of expression), so that UNROLL(N,...)
can do its magic.


KP, this __LSM_COUNT_EXPAND() is actually doing exactly what already
existing COUNT_ARGS() macro from linux/kernel.h does, which is
implemented way more succinctly:

#define __COUNT_ARGS(_0, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11,
_12, _n, X...) _n
#define COUNT_ARGS(X...) __COUNT_ARGS(, ##X, 12, 11, 10, 9, 8, 7, 6,
5, 4, 3, 2, 1, 0)


The only problem is that:

#define ___COUNT_ARGS(args...) COUNT_ARGS(args)
#define MAX_LSM_COUNT                   \
        ___COUNT_ARGS(                  \
                CAPABILITIES_ENABLED    \
                SELINUX_ENABLED         \
                SMACK_ENABLED           \
                APPARMOR_ENABLED        \
                TOMOYO_ENABLED          \
                YAMA_ENABLED            \
                LOADPIN_ENABLED         \
                LOCKDOWN_ENABLED        \
                BPF_LSM_ENABLED         \
                LANDLOCK_ENABLED)

overcounts by one, because of that trailing command within each
XXX_ENABLED definition.


But still, instead of a multi-line __LSM_COUNT{,1}_N set of macros, it
might be better to use the COUNT_ARGS trick, but just account for
those trailing commas? E.g., maybe just do a COUNT_COMMAS() macro
which will adjust all the return values by 1 down, except when there
is no comma (still 0).

It's pretty minor in the grand scheme of things, but just something
for you to be aware of.


> It certainly takes up fewer lines and would be easier to maintain
> than the set of macros you've proposed.
>
> +#define LSM_COUNT ( \
> +       (IS_ENABLED(CONFIG_SECURITY) ? 1 : 0) + \
> +       (IS_ENABLED(CONFIG_SECURITY_SELINUX) ? 1 : 0) + \
> +       (IS_ENABLED(CONFIG_SECURITY_SMACK) ? 1 : 0) + \
> +       (IS_ENABLED(CONFIG_SECURITY_TOMOYO) ? 1 : 0) + \
> +       (IS_ENABLED(CONFIG_IMA) ? 1 : 0) + \
> +       (IS_ENABLED(CONFIG_SECURITY_APPARMOR) ? 1 : 0) + \
> +       (IS_ENABLED(CONFIG_SECURITY_YAMA) ? 1 : 0) + \
> +       (IS_ENABLED(CONFIG_SECURITY_LOADPIN) ? 1 : 0) + \
> +       (IS_ENABLED(CONFIG_SECURITY_SAFESETID) ? 1 : 0) + \
> +       (IS_ENABLED(CONFIG_SECURITY_LOCKDOWN_LSM) ? 1 : 0) + \
> +       (IS_ENABLED(CONFIG_BPF_LSM) ? 1 : 0) + \
> +       (IS_ENABLED(CONFIG_SECURITY_LANDLOCK) ? 1 : 0))
>
>
> > +#endif  /* __LINUX_LSM_COUNT_H */
>
KP Singh Sept. 16, 2023, 12:54 a.m. UTC | #3
On Sat, Jun 17, 2023 at 12:27 AM Andrii Nakryiko
<andrii.nakryiko@gmail.com> wrote:
>
> On Thu, Jun 15, 2023 at 5:38 PM Casey Schaufler <casey@schaufler-ca.com> wrote:
> >
> > On 6/15/2023 5:04 PM, KP Singh wrote:
> > > These macros are a clever trick to determine a count of the number of
> > > LSMs that are enabled in the config to ascertain the maximum number of
> > > static calls that need to be configured per LSM hook.
> > >
> > > Without this one would need to generate static calls for (number of
> > > possible LSMs * number of LSM hooks) which ends up being quite wasteful
> > > especially when some LSMs are not compiled into the kernel.
> > >
> > > Suggested-by: Kui-Feng Lee <sinquersw@gmail.com>
> > > Signed-off-by: KP Singh <kpsingh@kernel.org>
> > > ---
> > >  include/linux/lsm_count.h | 131 ++++++++++++++++++++++++++++++++++++++
> > >  1 file changed, 131 insertions(+)
> > >  create mode 100644 include/linux/lsm_count.h
> > >
> > > diff --git a/include/linux/lsm_count.h b/include/linux/lsm_count.h
> > > new file mode 100644
> > > index 000000000000..818f62ffa723
> > > --- /dev/null
> > > +++ b/include/linux/lsm_count.h
> > > @@ -0,0 +1,131 @@
> > > +/* SPDX-License-Identifier: GPL-2.0 */
> > > +
> > > +/*
> > > + * Copyright (C) 2023 Google LLC.
> > > + */
> > > +
> > > +#ifndef __LINUX_LSM_COUNT_H
> > > +#define __LINUX_LSM_COUNT_H
> > > +
> > > +#include <linux/kconfig.h>
> > > +
> > > +/*
> > > + * Macros to count the number of LSMs enabled in the kernel at compile time.
> > > + */
> > > +
> > > +#define __LSM_COUNT_15(x, y...) 15
> > > +#define __LSM_COUNT_14(x, y...) 14
> > > +#define __LSM_COUNT_13(x, y...) 13
> > > +#define __LSM_COUNT_12(x, y...) 12
> > > +#define __LSM_COUNT_11(x, y...) 11
> > > +#define __LSM_COUNT_10(x, y...) 10
> > > +#define __LSM_COUNT_9(x, y...) 9
> > > +#define __LSM_COUNT_8(x, y...) 8
> > > +#define __LSM_COUNT_7(x, y...) 7
> > > +#define __LSM_COUNT_6(x, y...) 6
> > > +#define __LSM_COUNT_5(x, y...) 5
> > > +#define __LSM_COUNT_4(x, y...) 4
> > > +#define __LSM_COUNT_3(x, y...) 3
> > > +#define __LSM_COUNT_2(x, y...) 2
> > > +#define __LSM_COUNT_1(x, y...) 1
> > > +#define __LSM_COUNT_0(x, y...) 0
> > > +
> > > +#define __LSM_COUNT1_15(x, y...) __LSM_COUNT ## x ## _15(y)
> > > +#define __LSM_COUNT1_14(x, y...) __LSM_COUNT ## x ## _14(y)
> > > +#define __LSM_COUNT1_13(x, y...) __LSM_COUNT ## x ## _13(y)
> > > +#define __LSM_COUNT1_12(x, y...) __LSM_COUNT ## x ## _12(y)
> > > +#define __LSM_COUNT1_10(x, y...) __LSM_COUNT ## x ## _11(y)
> > > +#define __LSM_COUNT1_9(x, y...) __LSM_COUNT ## x ## _10(y)
> > > +#define __LSM_COUNT1_8(x, y...) __LSM_COUNT ## x ## _9(y)
> > > +#define __LSM_COUNT1_7(x, y...) __LSM_COUNT ## x ## _8(y)
> > > +#define __LSM_COUNT1_6(x, y...) __LSM_COUNT ## x ## _7(y)
> > > +#define __LSM_COUNT1_5(x, y...) __LSM_COUNT ## x ## _6(y)
> > > +#define __LSM_COUNT1_4(x, y...) __LSM_COUNT ## x ## _5(y)
> > > +#define __LSM_COUNT1_3(x, y...) __LSM_COUNT ## x ## _4(y)
> > > +#define __LSM_COUNT1_2(x, y...) __LSM_COUNT ## x ## _3(y)
> > > +#define __LSM_COUNT1_1(x, y...) __LSM_COUNT ## x ## _2(y)
> > > +#define __LSM_COUNT1_0(x, y...) __LSM_COUNT ## x ## _1(y)
> > > +#define __LSM_COUNT(x, y...) __LSM_COUNT ## x ## _0(y)
> > > +
> > > +#define __LSM_COUNT_EXPAND(x...) __LSM_COUNT(x)
> > > +
> > > +#if IS_ENABLED(CONFIG_SECURITY)
> > > +#define CAPABILITIES_ENABLED 1,
> > > +#else
> > > +#define CAPABILITIES_ENABLED
> > > +#endif
> > > +
> > > +#if IS_ENABLED(CONFIG_SECURITY_SELINUX)
> > > +#define SELINUX_ENABLED 1,
> > > +#else
> > > +#define SELINUX_ENABLED
> > > +#endif
> > > +
> > > +#if IS_ENABLED(CONFIG_SECURITY_SMACK)
> > > +#define SMACK_ENABLED 1,
> > > +#else
> > > +#define SMACK_ENABLED
> > > +#endif
> > > +
> > > +#if IS_ENABLED(CONFIG_SECURITY_APPARMOR)
> > > +#define APPARMOR_ENABLED 1,
> > > +#else
> > > +#define APPARMOR_ENABLED
> > > +#endif
> > > +
> > > +#if IS_ENABLED(CONFIG_SECURITY_TOMOYO)
> > > +#define TOMOYO_ENABLED 1,
> > > +#else
> > > +#define TOMOYO_ENABLED
> > > +#endif
> > > +
> > > +#if IS_ENABLED(CONFIG_SECURITY_YAMA)
> > > +#define YAMA_ENABLED 1,
> > > +#else
> > > +#define YAMA_ENABLED
> > > +#endif
> > > +
> > > +#if IS_ENABLED(CONFIG_SECURITY_LOADPIN)
> > > +#define LOADPIN_ENABLED 1,
> > > +#else
> > > +#define LOADPIN_ENABLED
> > > +#endif
> > > +
> > > +#if IS_ENABLED(CONFIG_SECURITY_LOCKDOWN_LSM)
> > > +#define LOCKDOWN_ENABLED 1,
> > > +#else
> > > +#define LOCKDOWN_ENABLED
> > > +#endif
> > > +
> > > +#if IS_ENABLED(CONFIG_BPF_LSM)
> > > +#define BPF_LSM_ENABLED 1,
> > > +#else
> > > +#define BPF_LSM_ENABLED
> > > +#endif
> > > +
> > > +#if IS_ENABLED(CONFIG_BPF_LSM)
> > > +#define BPF_LSM_ENABLED 1,
> > > +#else
> > > +#define BPF_LSM_ENABLED
> > > +#endif
>
> duplicate that redefined BPF_LSM_ENABLED unnecessarily
>
> > > +
> > > +#if IS_ENABLED(CONFIG_SECURITY_LANDLOCK)
> > > +#define LANDLOCK_ENABLED 1,
> > > +#else
> > > +#define LANDLOCK_ENABLED
> > > +#endif
> > > +
> > > +#define MAX_LSM_COUNT                        \
> > > +     __LSM_COUNT_EXPAND(             \
> > > +             CAPABILITIES_ENABLED    \
> > > +             SELINUX_ENABLED         \
> > > +             SMACK_ENABLED           \
> > > +             APPARMOR_ENABLED        \
> > > +             TOMOYO_ENABLED          \
> > > +             YAMA_ENABLED            \
> > > +             LOADPIN_ENABLED         \
> > > +             LOCKDOWN_ENABLED        \
> > > +             BPF_LSM_ENABLED         \
> > > +             LANDLOCK_ENABLED)
> > > +
> >
> > Wouldn't the following be simpler? It's from my LSM syscall patchset.
>
> Of course it would be, but unfortunately it doesn't work with the
> UNROLL() macro. This MAX_LSM_COUNT has to evaluate a compile-time
> integer *literal* (not any sort of expression), so that UNROLL(N,...)
> can do its magic.
>
>
> KP, this __LSM_COUNT_EXPAND() is actually doing exactly what already
> existing COUNT_ARGS() macro from linux/kernel.h does, which is
> implemented way more succinctly:
>
> #define __COUNT_ARGS(_0, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11,
> _12, _n, X...) _n
> #define COUNT_ARGS(X...) __COUNT_ARGS(, ##X, 12, 11, 10, 9, 8, 7, 6,
> 5, 4, 3, 2, 1, 0)
>
>
> The only problem is that:
>
> #define ___COUNT_ARGS(args...) COUNT_ARGS(args)
> #define MAX_LSM_COUNT                   \
>         ___COUNT_ARGS(                  \
>                 CAPABILITIES_ENABLED    \
>                 SELINUX_ENABLED         \
>                 SMACK_ENABLED           \
>                 APPARMOR_ENABLED        \
>                 TOMOYO_ENABLED          \
>                 YAMA_ENABLED            \
>                 LOADPIN_ENABLED         \
>                 LOCKDOWN_ENABLED        \
>                 BPF_LSM_ENABLED         \
>                 LANDLOCK_ENABLED)
>
> overcounts by one, because of that trailing command within each
> XXX_ENABLED definition.
>
>
> But still, instead of a multi-line __LSM_COUNT{,1}_N set of macros, it
> might be better to use the COUNT_ARGS trick, but just account for
> those trailing commas? E.g., maybe just do a COUNT_COMMAS() macro
> which will adjust all the return values by 1 down, except when there
> is no comma (still 0).
>
> It's pretty minor in the grand scheme of things, but just something
> for you to be aware of.

I am back and revving this up again (after a hiatus due to health
stuff and then ramping back at work). Apologies for the radio silence
here.

I agree, Also if you notice CAPABILITIES_ENABLED is kinda bogus, and
CONFIG_SECURITY is used as a proxy, overcounting by 1 is actually what
I need. So, thanks, this makes it much simpler.

^^
(I realized I had replied the above to Andrii and not replied back to the list).

>
>
> > It certainly takes up fewer lines and would be easier to maintain
> > than the set of macros you've proposed.
> >
> > +#define LSM_COUNT ( \
> > +       (IS_ENABLED(CONFIG_SECURITY) ? 1 : 0) + \
> > +       (IS_ENABLED(CONFIG_SECURITY_SELINUX) ? 1 : 0) + \
> > +       (IS_ENABLED(CONFIG_SECURITY_SMACK) ? 1 : 0) + \
> > +       (IS_ENABLED(CONFIG_SECURITY_TOMOYO) ? 1 : 0) + \
> > +       (IS_ENABLED(CONFIG_IMA) ? 1 : 0) + \
> > +       (IS_ENABLED(CONFIG_SECURITY_APPARMOR) ? 1 : 0) + \
> > +       (IS_ENABLED(CONFIG_SECURITY_YAMA) ? 1 : 0) + \
> > +       (IS_ENABLED(CONFIG_SECURITY_LOADPIN) ? 1 : 0) + \
> > +       (IS_ENABLED(CONFIG_SECURITY_SAFESETID) ? 1 : 0) + \
> > +       (IS_ENABLED(CONFIG_SECURITY_LOCKDOWN_LSM) ? 1 : 0) + \
> > +       (IS_ENABLED(CONFIG_BPF_LSM) ? 1 : 0) + \
> > +       (IS_ENABLED(CONFIG_SECURITY_LANDLOCK) ? 1 : 0))
> >
> >
> > > +#endif  /* __LINUX_LSM_COUNT_H */
> >
diff mbox series

Patch

diff --git a/include/linux/lsm_count.h b/include/linux/lsm_count.h
new file mode 100644
index 000000000000..818f62ffa723
--- /dev/null
+++ b/include/linux/lsm_count.h
@@ -0,0 +1,131 @@ 
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/*
+ * Copyright (C) 2023 Google LLC.
+ */
+
+#ifndef __LINUX_LSM_COUNT_H
+#define __LINUX_LSM_COUNT_H
+
+#include <linux/kconfig.h>
+
+/*
+ * Macros to count the number of LSMs enabled in the kernel at compile time.
+ */
+
+#define __LSM_COUNT_15(x, y...) 15
+#define __LSM_COUNT_14(x, y...) 14
+#define __LSM_COUNT_13(x, y...) 13
+#define __LSM_COUNT_12(x, y...) 12
+#define __LSM_COUNT_11(x, y...) 11
+#define __LSM_COUNT_10(x, y...) 10
+#define __LSM_COUNT_9(x, y...) 9
+#define __LSM_COUNT_8(x, y...) 8
+#define __LSM_COUNT_7(x, y...) 7
+#define __LSM_COUNT_6(x, y...) 6
+#define __LSM_COUNT_5(x, y...) 5
+#define __LSM_COUNT_4(x, y...) 4
+#define __LSM_COUNT_3(x, y...) 3
+#define __LSM_COUNT_2(x, y...) 2
+#define __LSM_COUNT_1(x, y...) 1
+#define __LSM_COUNT_0(x, y...) 0
+
+#define __LSM_COUNT1_15(x, y...) __LSM_COUNT ## x ## _15(y)
+#define __LSM_COUNT1_14(x, y...) __LSM_COUNT ## x ## _14(y)
+#define __LSM_COUNT1_13(x, y...) __LSM_COUNT ## x ## _13(y)
+#define __LSM_COUNT1_12(x, y...) __LSM_COUNT ## x ## _12(y)
+#define __LSM_COUNT1_10(x, y...) __LSM_COUNT ## x ## _11(y)
+#define __LSM_COUNT1_9(x, y...) __LSM_COUNT ## x ## _10(y)
+#define __LSM_COUNT1_8(x, y...) __LSM_COUNT ## x ## _9(y)
+#define __LSM_COUNT1_7(x, y...) __LSM_COUNT ## x ## _8(y)
+#define __LSM_COUNT1_6(x, y...) __LSM_COUNT ## x ## _7(y)
+#define __LSM_COUNT1_5(x, y...) __LSM_COUNT ## x ## _6(y)
+#define __LSM_COUNT1_4(x, y...) __LSM_COUNT ## x ## _5(y)
+#define __LSM_COUNT1_3(x, y...) __LSM_COUNT ## x ## _4(y)
+#define __LSM_COUNT1_2(x, y...) __LSM_COUNT ## x ## _3(y)
+#define __LSM_COUNT1_1(x, y...) __LSM_COUNT ## x ## _2(y)
+#define __LSM_COUNT1_0(x, y...) __LSM_COUNT ## x ## _1(y)
+#define __LSM_COUNT(x, y...) __LSM_COUNT ## x ## _0(y)
+
+#define __LSM_COUNT_EXPAND(x...) __LSM_COUNT(x)
+
+#if IS_ENABLED(CONFIG_SECURITY)
+#define CAPABILITIES_ENABLED 1,
+#else
+#define CAPABILITIES_ENABLED
+#endif
+
+#if IS_ENABLED(CONFIG_SECURITY_SELINUX)
+#define SELINUX_ENABLED 1,
+#else
+#define SELINUX_ENABLED
+#endif
+
+#if IS_ENABLED(CONFIG_SECURITY_SMACK)
+#define SMACK_ENABLED 1,
+#else
+#define SMACK_ENABLED
+#endif
+
+#if IS_ENABLED(CONFIG_SECURITY_APPARMOR)
+#define APPARMOR_ENABLED 1,
+#else
+#define APPARMOR_ENABLED
+#endif
+
+#if IS_ENABLED(CONFIG_SECURITY_TOMOYO)
+#define TOMOYO_ENABLED 1,
+#else
+#define TOMOYO_ENABLED
+#endif
+
+#if IS_ENABLED(CONFIG_SECURITY_YAMA)
+#define YAMA_ENABLED 1,
+#else
+#define YAMA_ENABLED
+#endif
+
+#if IS_ENABLED(CONFIG_SECURITY_LOADPIN)
+#define LOADPIN_ENABLED 1,
+#else
+#define LOADPIN_ENABLED
+#endif
+
+#if IS_ENABLED(CONFIG_SECURITY_LOCKDOWN_LSM)
+#define LOCKDOWN_ENABLED 1,
+#else
+#define LOCKDOWN_ENABLED
+#endif
+
+#if IS_ENABLED(CONFIG_BPF_LSM)
+#define BPF_LSM_ENABLED 1,
+#else
+#define BPF_LSM_ENABLED
+#endif
+
+#if IS_ENABLED(CONFIG_BPF_LSM)
+#define BPF_LSM_ENABLED 1,
+#else
+#define BPF_LSM_ENABLED
+#endif
+
+#if IS_ENABLED(CONFIG_SECURITY_LANDLOCK)
+#define LANDLOCK_ENABLED 1,
+#else
+#define LANDLOCK_ENABLED
+#endif
+
+#define MAX_LSM_COUNT			\
+	__LSM_COUNT_EXPAND(		\
+		CAPABILITIES_ENABLED	\
+		SELINUX_ENABLED		\
+		SMACK_ENABLED		\
+		APPARMOR_ENABLED	\
+		TOMOYO_ENABLED		\
+		YAMA_ENABLED		\
+		LOADPIN_ENABLED		\
+		LOCKDOWN_ENABLED	\
+		BPF_LSM_ENABLED		\
+		LANDLOCK_ENABLED)
+
+#endif  /* __LINUX_LSM_COUNT_H */