diff mbox series

[bpf-next,v2,2/2] bpf: allow bpf_current_task_under_cgroup() with BPF_CGROUP_*

Message ID 20240723012827.13280-3-technoboy85@gmail.com (mailing list archive)
State Superseded
Headers show
Series bpf: enable some functions in cgroup programs | expand

Commit Message

Matteo Croce July 23, 2024, 1:28 a.m. UTC
From: Matteo Croce <teknoraver@meta.com>

The helper bpf_current_task_under_cgroup() currently is only allowed for
tracing programs.
Allow its usage also in the BPF_CGROUP_* program types.
Move the code from kernel/trace/bpf_trace.c to kernel/bpf/helpers.c,
so it compiles also without CONFIG_BPF_EVENTS.

This will be used in systemd-networkd to monitor the sysctl writes,
and filter it's own writes from others:
https://github.com/systemd/systemd/pull/32212

Signed-off-by: Matteo Croce <teknoraver@meta.com>
---
 include/linux/bpf.h      |  1 +
 kernel/bpf/cgroup.c      |  2 ++
 kernel/bpf/helpers.c     | 23 +++++++++++++++++++++++
 kernel/trace/bpf_trace.c | 27 ++-------------------------
 4 files changed, 28 insertions(+), 25 deletions(-)

Comments

Andrii Nakryiko July 24, 2024, 11:36 p.m. UTC | #1
On Mon, Jul 22, 2024 at 6:29 PM <technoboy85@gmail.com> wrote:
>
> From: Matteo Croce <teknoraver@meta.com>
>
> The helper bpf_current_task_under_cgroup() currently is only allowed for
> tracing programs.
> Allow its usage also in the BPF_CGROUP_* program types.
> Move the code from kernel/trace/bpf_trace.c to kernel/bpf/helpers.c,
> so it compiles also without CONFIG_BPF_EVENTS.
>
> This will be used in systemd-networkd to monitor the sysctl writes,
> and filter it's own writes from others:
> https://github.com/systemd/systemd/pull/32212
>
> Signed-off-by: Matteo Croce <teknoraver@meta.com>
> ---
>  include/linux/bpf.h      |  1 +
>  kernel/bpf/cgroup.c      |  2 ++
>  kernel/bpf/helpers.c     | 23 +++++++++++++++++++++++
>  kernel/trace/bpf_trace.c | 27 ++-------------------------
>  4 files changed, 28 insertions(+), 25 deletions(-)
>
> diff --git a/include/linux/bpf.h b/include/linux/bpf.h
> index 4f1d4a97b9d1..4000fd161dda 100644
> --- a/include/linux/bpf.h
> +++ b/include/linux/bpf.h
> @@ -3188,6 +3188,7 @@ extern const struct bpf_func_proto bpf_sock_hash_update_proto;
>  extern const struct bpf_func_proto bpf_get_current_cgroup_id_proto;
>  extern const struct bpf_func_proto bpf_get_current_ancestor_cgroup_id_proto;
>  extern const struct bpf_func_proto bpf_get_cgroup_classid_curr_proto;
> +extern const struct bpf_func_proto bpf_current_task_under_cgroup_proto;
>  extern const struct bpf_func_proto bpf_msg_redirect_hash_proto;
>  extern const struct bpf_func_proto bpf_msg_redirect_map_proto;
>  extern const struct bpf_func_proto bpf_sk_redirect_hash_proto;
> diff --git a/kernel/bpf/cgroup.c b/kernel/bpf/cgroup.c
> index 8ba73042a239..e7113d700b87 100644
> --- a/kernel/bpf/cgroup.c
> +++ b/kernel/bpf/cgroup.c
> @@ -2581,6 +2581,8 @@ cgroup_current_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
>         case BPF_FUNC_get_cgroup_classid:
>                 return &bpf_get_cgroup_classid_curr_proto;
>  #endif
> +       case BPF_FUNC_current_task_under_cgroup:
> +               return &bpf_current_task_under_cgroup_proto;
>         default:
>                 return NULL;
>         }
> diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c
> index 23b782641077..eaa3ce14028a 100644
> --- a/kernel/bpf/helpers.c
> +++ b/kernel/bpf/helpers.c
> @@ -2457,6 +2457,29 @@ __bpf_kfunc long bpf_task_under_cgroup(struct task_struct *task,
>         return ret;
>  }
>
> +BPF_CALL_2(bpf_current_task_under_cgroup, struct bpf_map *, map, u32, idx)
> +{
> +       struct bpf_array *array = container_of(map, struct bpf_array, map);
> +       struct cgroup *cgrp;
> +
> +       if (unlikely(idx >= array->map.max_entries))
> +               return -E2BIG;
> +
> +       cgrp = READ_ONCE(array->ptrs[idx]);
> +       if (unlikely(!cgrp))
> +               return -EAGAIN;
> +
> +       return task_under_cgroup_hierarchy(current, cgrp);
> +}
> +
> +const struct bpf_func_proto bpf_current_task_under_cgroup_proto = {
> +       .func           = bpf_current_task_under_cgroup,
> +       .gpl_only       = false,
> +       .ret_type       = RET_INTEGER,
> +       .arg1_type      = ARG_CONST_MAP_PTR,
> +       .arg2_type      = ARG_ANYTHING,
> +};
> +
>  /**
>   * bpf_task_get_cgroup1 - Acquires the associated cgroup of a task within a
>   * specific cgroup1 hierarchy. The cgroup1 hierarchy is identified by its
> diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
> index cd098846e251..ea5cdd122024 100644
> --- a/kernel/trace/bpf_trace.c
> +++ b/kernel/trace/bpf_trace.c
> @@ -798,29 +798,6 @@ const struct bpf_func_proto bpf_task_pt_regs_proto = {
>         .ret_btf_id     = &bpf_task_pt_regs_ids[0],
>  };
>
> -BPF_CALL_2(bpf_current_task_under_cgroup, struct bpf_map *, map, u32, idx)
> -{
> -       struct bpf_array *array = container_of(map, struct bpf_array, map);
> -       struct cgroup *cgrp;
> -
> -       if (unlikely(idx >= array->map.max_entries))
> -               return -E2BIG;
> -
> -       cgrp = READ_ONCE(array->ptrs[idx]);
> -       if (unlikely(!cgrp))
> -               return -EAGAIN;
> -
> -       return task_under_cgroup_hierarchy(current, cgrp);
> -}
> -
> -static const struct bpf_func_proto bpf_current_task_under_cgroup_proto = {
> -       .func           = bpf_current_task_under_cgroup,
> -       .gpl_only       = false,
> -       .ret_type       = RET_INTEGER,
> -       .arg1_type      = ARG_CONST_MAP_PTR,
> -       .arg2_type      = ARG_ANYTHING,
> -};
> -
>  struct send_signal_irq_work {
>         struct irq_work irq_work;
>         struct task_struct *task;
> @@ -1548,8 +1525,6 @@ bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
>                 return &bpf_get_numa_node_id_proto;
>         case BPF_FUNC_perf_event_read:
>                 return &bpf_perf_event_read_proto;
> -       case BPF_FUNC_current_task_under_cgroup:
> -               return &bpf_current_task_under_cgroup_proto;
>         case BPF_FUNC_get_prandom_u32:
>                 return &bpf_get_prandom_u32_proto;
>         case BPF_FUNC_probe_write_user:
> @@ -1578,6 +1553,8 @@ bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
>                 return &bpf_cgrp_storage_get_proto;
>         case BPF_FUNC_cgrp_storage_delete:
>                 return &bpf_cgrp_storage_delete_proto;
> +       case BPF_FUNC_current_task_under_cgroup:
> +               return &bpf_current_task_under_cgroup_proto;

let's not change this part unnecessarily? It clearly works if
!CONFIG_CGROUPS, so why move them? On the other hand, this,
technically, can regress some BPF program verification on
!CONFIG_CGROUPS. So I'd drop this part, but the rest looks good.

With that, feel free to add my ack for the next revision:

Acked-by: Andrii Nakryiko <andrii@kernel.org>


pw-bot: cr

>  #endif
>         case BPF_FUNC_send_signal:
>                 return &bpf_send_signal_proto;
> --
> 2.45.2
>
>
diff mbox series

Patch

diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index 4f1d4a97b9d1..4000fd161dda 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -3188,6 +3188,7 @@  extern const struct bpf_func_proto bpf_sock_hash_update_proto;
 extern const struct bpf_func_proto bpf_get_current_cgroup_id_proto;
 extern const struct bpf_func_proto bpf_get_current_ancestor_cgroup_id_proto;
 extern const struct bpf_func_proto bpf_get_cgroup_classid_curr_proto;
+extern const struct bpf_func_proto bpf_current_task_under_cgroup_proto;
 extern const struct bpf_func_proto bpf_msg_redirect_hash_proto;
 extern const struct bpf_func_proto bpf_msg_redirect_map_proto;
 extern const struct bpf_func_proto bpf_sk_redirect_hash_proto;
diff --git a/kernel/bpf/cgroup.c b/kernel/bpf/cgroup.c
index 8ba73042a239..e7113d700b87 100644
--- a/kernel/bpf/cgroup.c
+++ b/kernel/bpf/cgroup.c
@@ -2581,6 +2581,8 @@  cgroup_current_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
 	case BPF_FUNC_get_cgroup_classid:
 		return &bpf_get_cgroup_classid_curr_proto;
 #endif
+	case BPF_FUNC_current_task_under_cgroup:
+		return &bpf_current_task_under_cgroup_proto;
 	default:
 		return NULL;
 	}
diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c
index 23b782641077..eaa3ce14028a 100644
--- a/kernel/bpf/helpers.c
+++ b/kernel/bpf/helpers.c
@@ -2457,6 +2457,29 @@  __bpf_kfunc long bpf_task_under_cgroup(struct task_struct *task,
 	return ret;
 }
 
+BPF_CALL_2(bpf_current_task_under_cgroup, struct bpf_map *, map, u32, idx)
+{
+	struct bpf_array *array = container_of(map, struct bpf_array, map);
+	struct cgroup *cgrp;
+
+	if (unlikely(idx >= array->map.max_entries))
+		return -E2BIG;
+
+	cgrp = READ_ONCE(array->ptrs[idx]);
+	if (unlikely(!cgrp))
+		return -EAGAIN;
+
+	return task_under_cgroup_hierarchy(current, cgrp);
+}
+
+const struct bpf_func_proto bpf_current_task_under_cgroup_proto = {
+	.func           = bpf_current_task_under_cgroup,
+	.gpl_only       = false,
+	.ret_type       = RET_INTEGER,
+	.arg1_type      = ARG_CONST_MAP_PTR,
+	.arg2_type      = ARG_ANYTHING,
+};
+
 /**
  * bpf_task_get_cgroup1 - Acquires the associated cgroup of a task within a
  * specific cgroup1 hierarchy. The cgroup1 hierarchy is identified by its
diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
index cd098846e251..ea5cdd122024 100644
--- a/kernel/trace/bpf_trace.c
+++ b/kernel/trace/bpf_trace.c
@@ -798,29 +798,6 @@  const struct bpf_func_proto bpf_task_pt_regs_proto = {
 	.ret_btf_id	= &bpf_task_pt_regs_ids[0],
 };
 
-BPF_CALL_2(bpf_current_task_under_cgroup, struct bpf_map *, map, u32, idx)
-{
-	struct bpf_array *array = container_of(map, struct bpf_array, map);
-	struct cgroup *cgrp;
-
-	if (unlikely(idx >= array->map.max_entries))
-		return -E2BIG;
-
-	cgrp = READ_ONCE(array->ptrs[idx]);
-	if (unlikely(!cgrp))
-		return -EAGAIN;
-
-	return task_under_cgroup_hierarchy(current, cgrp);
-}
-
-static const struct bpf_func_proto bpf_current_task_under_cgroup_proto = {
-	.func           = bpf_current_task_under_cgroup,
-	.gpl_only       = false,
-	.ret_type       = RET_INTEGER,
-	.arg1_type      = ARG_CONST_MAP_PTR,
-	.arg2_type      = ARG_ANYTHING,
-};
-
 struct send_signal_irq_work {
 	struct irq_work irq_work;
 	struct task_struct *task;
@@ -1548,8 +1525,6 @@  bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
 		return &bpf_get_numa_node_id_proto;
 	case BPF_FUNC_perf_event_read:
 		return &bpf_perf_event_read_proto;
-	case BPF_FUNC_current_task_under_cgroup:
-		return &bpf_current_task_under_cgroup_proto;
 	case BPF_FUNC_get_prandom_u32:
 		return &bpf_get_prandom_u32_proto;
 	case BPF_FUNC_probe_write_user:
@@ -1578,6 +1553,8 @@  bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
 		return &bpf_cgrp_storage_get_proto;
 	case BPF_FUNC_cgrp_storage_delete:
 		return &bpf_cgrp_storage_delete_proto;
+	case BPF_FUNC_current_task_under_cgroup:
+		return &bpf_current_task_under_cgroup_proto;
 #endif
 	case BPF_FUNC_send_signal:
 		return &bpf_send_signal_proto;