diff mbox series

[bpf-next,2/8] bpf: inline map creation logic in map_create() function

Message ID 20230412043300.360803-3-andrii@kernel.org (mailing list archive)
State Rejected
Delegated to: Paul Moore
Headers show
Series New BPF map and BTF security LSM hooks | expand

Commit Message

Andrii Nakryiko April 12, 2023, 4:32 a.m. UTC
Keep all the relevant generic sanity checks, permission checks, and
creation and initialization logic in one linear piece of code. Currently
helper function that handles memory allocation and partial
initialization is split apart and is about 1000 lines higher in the
file, hurting readability.

Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
---
 kernel/bpf/syscall.c | 54 ++++++++++++++++++--------------------------
 1 file changed, 22 insertions(+), 32 deletions(-)

Comments

Kees Cook April 12, 2023, 5:53 p.m. UTC | #1
On Tue, Apr 11, 2023 at 09:32:54PM -0700, Andrii Nakryiko wrote:
> Keep all the relevant generic sanity checks, permission checks, and
> creation and initialization logic in one linear piece of code. Currently
> helper function that handles memory allocation and partial
> initialization is split apart and is about 1000 lines higher in the
> file, hurting readability.

At first glance, this seems like a step in the wrong direction: having a
single-purpose function pulled out of a larger one seems like a good
thing for stuff like unit testing, etc. Unless there's a reason later in
the series for this inlining (which should be called out in the
changelog here), I would say if it is only readability, just move the
function down 1000 lines but leave it a separate function.

-Kees

> 
> Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
> ---
>  kernel/bpf/syscall.c | 54 ++++++++++++++++++--------------------------
>  1 file changed, 22 insertions(+), 32 deletions(-)
> 
> diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
> index c1d268025985..a090737f98ea 100644
> --- a/kernel/bpf/syscall.c
> +++ b/kernel/bpf/syscall.c
> @@ -108,37 +108,6 @@ const struct bpf_map_ops bpf_map_offload_ops = {
>  	.map_mem_usage = bpf_map_offload_map_mem_usage,
>  };
>  
> -static struct bpf_map *find_and_alloc_map(union bpf_attr *attr)
> -{
> -	const struct bpf_map_ops *ops;
> -	u32 type = attr->map_type;
> -	struct bpf_map *map;
> -	int err;
> -
> -	if (type >= ARRAY_SIZE(bpf_map_types))
> -		return ERR_PTR(-EINVAL);
> -	type = array_index_nospec(type, ARRAY_SIZE(bpf_map_types));
> -	ops = bpf_map_types[type];
> -	if (!ops)
> -		return ERR_PTR(-EINVAL);
> -
> -	if (ops->map_alloc_check) {
> -		err = ops->map_alloc_check(attr);
> -		if (err)
> -			return ERR_PTR(err);
> -	}
> -	if (attr->map_ifindex)
> -		ops = &bpf_map_offload_ops;
> -	if (!ops->map_mem_usage)
> -		return ERR_PTR(-EINVAL);
> -	map = ops->map_alloc(attr);
> -	if (IS_ERR(map))
> -		return map;
> -	map->ops = ops;
> -	map->map_type = type;
> -	return map;
> -}
> -
>  static void bpf_map_write_active_inc(struct bpf_map *map)
>  {
>  	atomic64_inc(&map->writecnt);
> @@ -1124,7 +1093,9 @@ static int map_check_btf(struct bpf_map *map, const struct btf *btf,
>  /* called via syscall */
>  static int map_create(union bpf_attr *attr)
>  {
> +	const struct bpf_map_ops *ops;
>  	int numa_node = bpf_map_attr_numa_node(attr);
> +	u32 map_type = attr->map_type;
>  	struct btf_field_offs *foffs;
>  	struct bpf_map *map;
>  	int f_flags;
> @@ -1167,9 +1138,28 @@ static int map_create(union bpf_attr *attr)
>  		return -EINVAL;
>  
>  	/* find map type and init map: hashtable vs rbtree vs bloom vs ... */
> -	map = find_and_alloc_map(attr);
> +	map_type = attr->map_type;
> +	if (map_type >= ARRAY_SIZE(bpf_map_types))
> +		return -EINVAL;
> +	map_type = array_index_nospec(map_type, ARRAY_SIZE(bpf_map_types));
> +	ops = bpf_map_types[map_type];
> +	if (!ops)
> +		return -EINVAL;
> +
> +	if (ops->map_alloc_check) {
> +		err = ops->map_alloc_check(attr);
> +		if (err)
> +			return err;
> +	}
> +	if (attr->map_ifindex)
> +		ops = &bpf_map_offload_ops;
> +	if (!ops->map_mem_usage)
> +		return -EINVAL;
> +	map = ops->map_alloc(attr);
>  	if (IS_ERR(map))
>  		return PTR_ERR(map);
> +	map->ops = ops;
> +	map->map_type = map_type;
>  
>  	err = bpf_obj_name_cpy(map->name, attr->map_name,
>  			       sizeof(attr->map_name));
> -- 
> 2.34.1
>
Andrii Nakryiko April 13, 2023, 12:22 a.m. UTC | #2
On Wed, Apr 12, 2023 at 10:53 AM Kees Cook <keescook@chromium.org> wrote:
>
> On Tue, Apr 11, 2023 at 09:32:54PM -0700, Andrii Nakryiko wrote:
> > Keep all the relevant generic sanity checks, permission checks, and
> > creation and initialization logic in one linear piece of code. Currently
> > helper function that handles memory allocation and partial
> > initialization is split apart and is about 1000 lines higher in the
> > file, hurting readability.
>
> At first glance, this seems like a step in the wrong direction: having a
> single-purpose function pulled out of a larger one seems like a good
> thing for stuff like unit testing, etc. Unless there's a reason later in
> the series for this inlining (which should be called out in the
> changelog here), I would say if it is only readability, just move the
> function down 1000 lines but leave it a separate function.

Oh, I should probably clarify this in the commit message. This
function is not that single-function, really. It performs some sanity
checking and then allocates and (partially) initializes the BPF map
itself. By "inlining" it, it makes it possible to perform these sanity
checks first, then do capabilities/security checks, and only if both
pass, allocate and initialize the map. Next patch inserts
(centralizes) all the spread out capabilities checks from per-map
custom callbacks into the same function, right before performing map
allocation and initialization, but after validation of parameters.

So yeah, I do take advantage of this in the next patch, because LSM
hook gets validated bpf_uattr. I'll call this out more clearly. It's
definitely not just moving code around for no good reason.


>
> -Kees
>
> >
> > Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
> > ---
> >  kernel/bpf/syscall.c | 54 ++++++++++++++++++--------------------------
> >  1 file changed, 22 insertions(+), 32 deletions(-)
> >
> > diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
> > index c1d268025985..a090737f98ea 100644
> > --- a/kernel/bpf/syscall.c
> > +++ b/kernel/bpf/syscall.c
> > @@ -108,37 +108,6 @@ const struct bpf_map_ops bpf_map_offload_ops = {
> >       .map_mem_usage = bpf_map_offload_map_mem_usage,
> >  };
> >
> > -static struct bpf_map *find_and_alloc_map(union bpf_attr *attr)
> > -{
> > -     const struct bpf_map_ops *ops;
> > -     u32 type = attr->map_type;
> > -     struct bpf_map *map;
> > -     int err;
> > -
> > -     if (type >= ARRAY_SIZE(bpf_map_types))
> > -             return ERR_PTR(-EINVAL);
> > -     type = array_index_nospec(type, ARRAY_SIZE(bpf_map_types));
> > -     ops = bpf_map_types[type];
> > -     if (!ops)
> > -             return ERR_PTR(-EINVAL);
> > -
> > -     if (ops->map_alloc_check) {
> > -             err = ops->map_alloc_check(attr);
> > -             if (err)
> > -                     return ERR_PTR(err);
> > -     }
> > -     if (attr->map_ifindex)
> > -             ops = &bpf_map_offload_ops;
> > -     if (!ops->map_mem_usage)
> > -             return ERR_PTR(-EINVAL);
> > -     map = ops->map_alloc(attr);
> > -     if (IS_ERR(map))
> > -             return map;
> > -     map->ops = ops;
> > -     map->map_type = type;
> > -     return map;
> > -}
> > -
> >  static void bpf_map_write_active_inc(struct bpf_map *map)
> >  {
> >       atomic64_inc(&map->writecnt);
> > @@ -1124,7 +1093,9 @@ static int map_check_btf(struct bpf_map *map, const struct btf *btf,
> >  /* called via syscall */
> >  static int map_create(union bpf_attr *attr)
> >  {
> > +     const struct bpf_map_ops *ops;
> >       int numa_node = bpf_map_attr_numa_node(attr);
> > +     u32 map_type = attr->map_type;
> >       struct btf_field_offs *foffs;
> >       struct bpf_map *map;
> >       int f_flags;
> > @@ -1167,9 +1138,28 @@ static int map_create(union bpf_attr *attr)
> >               return -EINVAL;
> >
> >       /* find map type and init map: hashtable vs rbtree vs bloom vs ... */
> > -     map = find_and_alloc_map(attr);
> > +     map_type = attr->map_type;
> > +     if (map_type >= ARRAY_SIZE(bpf_map_types))
> > +             return -EINVAL;
> > +     map_type = array_index_nospec(map_type, ARRAY_SIZE(bpf_map_types));
> > +     ops = bpf_map_types[map_type];
> > +     if (!ops)
> > +             return -EINVAL;
> > +
> > +     if (ops->map_alloc_check) {
> > +             err = ops->map_alloc_check(attr);
> > +             if (err)
> > +                     return err;
> > +     }
> > +     if (attr->map_ifindex)
> > +             ops = &bpf_map_offload_ops;
> > +     if (!ops->map_mem_usage)
> > +             return -EINVAL;
> > +     map = ops->map_alloc(attr);
> >       if (IS_ERR(map))
> >               return PTR_ERR(map);
> > +     map->ops = ops;
> > +     map->map_type = map_type;
> >
> >       err = bpf_obj_name_cpy(map->name, attr->map_name,
> >                              sizeof(attr->map_name));
> > --
> > 2.34.1
> >
>
> --
> Kees Cook
diff mbox series

Patch

diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index c1d268025985..a090737f98ea 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -108,37 +108,6 @@  const struct bpf_map_ops bpf_map_offload_ops = {
 	.map_mem_usage = bpf_map_offload_map_mem_usage,
 };
 
-static struct bpf_map *find_and_alloc_map(union bpf_attr *attr)
-{
-	const struct bpf_map_ops *ops;
-	u32 type = attr->map_type;
-	struct bpf_map *map;
-	int err;
-
-	if (type >= ARRAY_SIZE(bpf_map_types))
-		return ERR_PTR(-EINVAL);
-	type = array_index_nospec(type, ARRAY_SIZE(bpf_map_types));
-	ops = bpf_map_types[type];
-	if (!ops)
-		return ERR_PTR(-EINVAL);
-
-	if (ops->map_alloc_check) {
-		err = ops->map_alloc_check(attr);
-		if (err)
-			return ERR_PTR(err);
-	}
-	if (attr->map_ifindex)
-		ops = &bpf_map_offload_ops;
-	if (!ops->map_mem_usage)
-		return ERR_PTR(-EINVAL);
-	map = ops->map_alloc(attr);
-	if (IS_ERR(map))
-		return map;
-	map->ops = ops;
-	map->map_type = type;
-	return map;
-}
-
 static void bpf_map_write_active_inc(struct bpf_map *map)
 {
 	atomic64_inc(&map->writecnt);
@@ -1124,7 +1093,9 @@  static int map_check_btf(struct bpf_map *map, const struct btf *btf,
 /* called via syscall */
 static int map_create(union bpf_attr *attr)
 {
+	const struct bpf_map_ops *ops;
 	int numa_node = bpf_map_attr_numa_node(attr);
+	u32 map_type = attr->map_type;
 	struct btf_field_offs *foffs;
 	struct bpf_map *map;
 	int f_flags;
@@ -1167,9 +1138,28 @@  static int map_create(union bpf_attr *attr)
 		return -EINVAL;
 
 	/* find map type and init map: hashtable vs rbtree vs bloom vs ... */
-	map = find_and_alloc_map(attr);
+	map_type = attr->map_type;
+	if (map_type >= ARRAY_SIZE(bpf_map_types))
+		return -EINVAL;
+	map_type = array_index_nospec(map_type, ARRAY_SIZE(bpf_map_types));
+	ops = bpf_map_types[map_type];
+	if (!ops)
+		return -EINVAL;
+
+	if (ops->map_alloc_check) {
+		err = ops->map_alloc_check(attr);
+		if (err)
+			return err;
+	}
+	if (attr->map_ifindex)
+		ops = &bpf_map_offload_ops;
+	if (!ops->map_mem_usage)
+		return -EINVAL;
+	map = ops->map_alloc(attr);
 	if (IS_ERR(map))
 		return PTR_ERR(map);
+	map->ops = ops;
+	map->map_type = map_type;
 
 	err = bpf_obj_name_cpy(map->name, attr->map_name,
 			       sizeof(attr->map_name));