diff mbox series

[RFC,v3,1/3] bpf: cgroup: Introduce helper cgroup_bpf_current_enabled()

Message ID 20231213143813.6818-2-michael.weiss@aisec.fraunhofer.de (mailing list archive)
State RFC
Delegated to: Paul Moore
Headers show
Series devguard: guard mknod for non-initial user namespace | expand

Commit Message

Michael Weiß Dec. 13, 2023, 2:38 p.m. UTC
This helper can be used to check if a cgroup-bpf specific program is
active for the current task.

Signed-off-by: Michael Weiß <michael.weiss@aisec.fraunhofer.de>
Reviewed-by: Alexander Mikhalitsyn <aleksandr.mikhalitsyn@canonical.com>
---
 include/linux/bpf-cgroup.h |  2 ++
 kernel/bpf/cgroup.c        | 14 ++++++++++++++
 2 files changed, 16 insertions(+)

Comments

Yonghong Song Dec. 13, 2023, 4:59 p.m. UTC | #1
On 12/13/23 6:38 AM, Michael Weiß wrote:
> This helper can be used to check if a cgroup-bpf specific program is
> active for the current task.
>
> Signed-off-by: Michael Weiß <michael.weiss@aisec.fraunhofer.de>
> Reviewed-by: Alexander Mikhalitsyn <aleksandr.mikhalitsyn@canonical.com>
> ---
>   include/linux/bpf-cgroup.h |  2 ++
>   kernel/bpf/cgroup.c        | 14 ++++++++++++++
>   2 files changed, 16 insertions(+)
>
> diff --git a/include/linux/bpf-cgroup.h b/include/linux/bpf-cgroup.h
> index a789266feac3..7cb49bde09ff 100644
> --- a/include/linux/bpf-cgroup.h
> +++ b/include/linux/bpf-cgroup.h
> @@ -191,6 +191,8 @@ static inline bool cgroup_bpf_sock_enabled(struct sock *sk,
>   	return array != &bpf_empty_prog_array.hdr;
>   }
>   
> +bool cgroup_bpf_current_enabled(enum cgroup_bpf_attach_type type);
> +
>   /* Wrappers for __cgroup_bpf_run_filter_skb() guarded by cgroup_bpf_enabled. */
>   #define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk, skb)			      \
>   ({									      \
> diff --git a/kernel/bpf/cgroup.c b/kernel/bpf/cgroup.c
> index 491d20038cbe..9007165abe8c 100644
> --- a/kernel/bpf/cgroup.c
> +++ b/kernel/bpf/cgroup.c
> @@ -24,6 +24,20 @@
>   DEFINE_STATIC_KEY_ARRAY_FALSE(cgroup_bpf_enabled_key, MAX_CGROUP_BPF_ATTACH_TYPE);
>   EXPORT_SYMBOL(cgroup_bpf_enabled_key);
>   
> +bool cgroup_bpf_current_enabled(enum cgroup_bpf_attach_type type)
> +{
> +	struct cgroup *cgrp;
> +	struct bpf_prog_array *array;
> +
> +	rcu_read_lock();
> +	cgrp = task_dfl_cgroup(current);
> +	rcu_read_unlock();
> +
> +	array = rcu_access_pointer(cgrp->bpf.effective[type]);

This seems wrong here. The cgrp could become invalid once leaving
rcu critical section.

> +	return array != &bpf_empty_prog_array.hdr;

I guess you need include 'array' usage as well in the rcu cs.
So overall should look like:

	rcu_read_lock();
	cgrp = task_dfl_cgroup(current);
	array = rcu_access_pointer(cgrp->bpf.effective[type]);
	bpf_prog_exists = array != &bpf_empty_prog_array.hdr;
	rcu_read_unlock();

	return bpf_prog_exists;

> +}
> +EXPORT_SYMBOL(cgroup_bpf_current_enabled);
> +
>   /* __always_inline is necessary to prevent indirect call through run_prog
>    * function pointer.
>    */
Michael Weiß Dec. 14, 2023, 8:17 a.m. UTC | #2
On 13.12.23 17:59, Yonghong Song wrote:
> 
> On 12/13/23 6:38 AM, Michael Weiß wrote:
>> This helper can be used to check if a cgroup-bpf specific program is
>> active for the current task.
>>
>> Signed-off-by: Michael Weiß <michael.weiss@aisec.fraunhofer.de>
>> Reviewed-by: Alexander Mikhalitsyn <aleksandr.mikhalitsyn@canonical.com>
>> ---
>>   include/linux/bpf-cgroup.h |  2 ++
>>   kernel/bpf/cgroup.c        | 14 ++++++++++++++
>>   2 files changed, 16 insertions(+)
>>
>> diff --git a/include/linux/bpf-cgroup.h b/include/linux/bpf-cgroup.h
>> index a789266feac3..7cb49bde09ff 100644
>> --- a/include/linux/bpf-cgroup.h
>> +++ b/include/linux/bpf-cgroup.h
>> @@ -191,6 +191,8 @@ static inline bool cgroup_bpf_sock_enabled(struct sock *sk,
>>   	return array != &bpf_empty_prog_array.hdr;
>>   }
>>   
>> +bool cgroup_bpf_current_enabled(enum cgroup_bpf_attach_type type);
>> +
>>   /* Wrappers for __cgroup_bpf_run_filter_skb() guarded by cgroup_bpf_enabled. */
>>   #define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk, skb)			      \
>>   ({									      \
>> diff --git a/kernel/bpf/cgroup.c b/kernel/bpf/cgroup.c
>> index 491d20038cbe..9007165abe8c 100644
>> --- a/kernel/bpf/cgroup.c
>> +++ b/kernel/bpf/cgroup.c
>> @@ -24,6 +24,20 @@
>>   DEFINE_STATIC_KEY_ARRAY_FALSE(cgroup_bpf_enabled_key, MAX_CGROUP_BPF_ATTACH_TYPE);
>>   EXPORT_SYMBOL(cgroup_bpf_enabled_key);
>>   
>> +bool cgroup_bpf_current_enabled(enum cgroup_bpf_attach_type type)
>> +{
>> +	struct cgroup *cgrp;
>> +	struct bpf_prog_array *array;
>> +
>> +	rcu_read_lock();
>> +	cgrp = task_dfl_cgroup(current);
>> +	rcu_read_unlock();
>> +
>> +	array = rcu_access_pointer(cgrp->bpf.effective[type]);
> 
> This seems wrong here. The cgrp could become invalid once leaving
> rcu critical section.

You are right, maybe we where to opportunistic here. We just wanted
to hold the lock as short as possible.

> 
>> +	return array != &bpf_empty_prog_array.hdr;
> 
> I guess you need include 'array' usage as well in the rcu cs.
> So overall should look like:
> 
> 	rcu_read_lock();
> 	cgrp = task_dfl_cgroup(current);
> 	array = rcu_access_pointer(cgrp->bpf.effective[type]);

Looks reasonable, but that we are in the cs now I would change this to
rcu_dereference() then.

> 	bpf_prog_exists = array != &bpf_empty_prog_array.hdr;
> 	rcu_read_unlock();
> 
> 	return bpf_prog_exists;
> 
>> +}
>> +EXPORT_SYMBOL(cgroup_bpf_current_enabled);
>> +
>>   /* __always_inline is necessary to prevent indirect call through run_prog
>>    * function pointer.
>>    */
Yonghong Song Dec. 15, 2023, 2:31 p.m. UTC | #3
On 12/14/23 12:17 AM, Michael Weiß wrote:
> On 13.12.23 17:59, Yonghong Song wrote:
>> On 12/13/23 6:38 AM, Michael Weiß wrote:
>>> This helper can be used to check if a cgroup-bpf specific program is
>>> active for the current task.
>>>
>>> Signed-off-by: Michael Weiß <michael.weiss@aisec.fraunhofer.de>
>>> Reviewed-by: Alexander Mikhalitsyn <aleksandr.mikhalitsyn@canonical.com>
>>> ---
>>>    include/linux/bpf-cgroup.h |  2 ++
>>>    kernel/bpf/cgroup.c        | 14 ++++++++++++++
>>>    2 files changed, 16 insertions(+)
>>>
>>> diff --git a/include/linux/bpf-cgroup.h b/include/linux/bpf-cgroup.h
>>> index a789266feac3..7cb49bde09ff 100644
>>> --- a/include/linux/bpf-cgroup.h
>>> +++ b/include/linux/bpf-cgroup.h
>>> @@ -191,6 +191,8 @@ static inline bool cgroup_bpf_sock_enabled(struct sock *sk,
>>>    	return array != &bpf_empty_prog_array.hdr;
>>>    }
>>>    
>>> +bool cgroup_bpf_current_enabled(enum cgroup_bpf_attach_type type);
>>> +
>>>    /* Wrappers for __cgroup_bpf_run_filter_skb() guarded by cgroup_bpf_enabled. */
>>>    #define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk, skb)			      \
>>>    ({									      \
>>> diff --git a/kernel/bpf/cgroup.c b/kernel/bpf/cgroup.c
>>> index 491d20038cbe..9007165abe8c 100644
>>> --- a/kernel/bpf/cgroup.c
>>> +++ b/kernel/bpf/cgroup.c
>>> @@ -24,6 +24,20 @@
>>>    DEFINE_STATIC_KEY_ARRAY_FALSE(cgroup_bpf_enabled_key, MAX_CGROUP_BPF_ATTACH_TYPE);
>>>    EXPORT_SYMBOL(cgroup_bpf_enabled_key);
>>>    
>>> +bool cgroup_bpf_current_enabled(enum cgroup_bpf_attach_type type)
>>> +{
>>> +	struct cgroup *cgrp;
>>> +	struct bpf_prog_array *array;
>>> +
>>> +	rcu_read_lock();
>>> +	cgrp = task_dfl_cgroup(current);
>>> +	rcu_read_unlock();
>>> +
>>> +	array = rcu_access_pointer(cgrp->bpf.effective[type]);
>> This seems wrong here. The cgrp could become invalid once leaving
>> rcu critical section.
> You are right, maybe we where to opportunistic here. We just wanted
> to hold the lock as short as possible.
>
>>> +	return array != &bpf_empty_prog_array.hdr;
>> I guess you need include 'array' usage as well in the rcu cs.
>> So overall should look like:
>>
>> 	rcu_read_lock();
>> 	cgrp = task_dfl_cgroup(current);
>> 	array = rcu_access_pointer(cgrp->bpf.effective[type]);
> Looks reasonable, but that we are in the cs now I would change this to
> rcu_dereference() then.

copy-paste error. Right, should use rcu_deference() indeed.

>
>> 	bpf_prog_exists = array != &bpf_empty_prog_array.hdr;
>> 	rcu_read_unlock();
>>
>> 	return bpf_prog_exists;
>>
>>> +}
>>> +EXPORT_SYMBOL(cgroup_bpf_current_enabled);
>>> +
>>>    /* __always_inline is necessary to prevent indirect call through run_prog
>>>     * function pointer.
>>>     */
diff mbox series

Patch

diff --git a/include/linux/bpf-cgroup.h b/include/linux/bpf-cgroup.h
index a789266feac3..7cb49bde09ff 100644
--- a/include/linux/bpf-cgroup.h
+++ b/include/linux/bpf-cgroup.h
@@ -191,6 +191,8 @@  static inline bool cgroup_bpf_sock_enabled(struct sock *sk,
 	return array != &bpf_empty_prog_array.hdr;
 }
 
+bool cgroup_bpf_current_enabled(enum cgroup_bpf_attach_type type);
+
 /* Wrappers for __cgroup_bpf_run_filter_skb() guarded by cgroup_bpf_enabled. */
 #define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk, skb)			      \
 ({									      \
diff --git a/kernel/bpf/cgroup.c b/kernel/bpf/cgroup.c
index 491d20038cbe..9007165abe8c 100644
--- a/kernel/bpf/cgroup.c
+++ b/kernel/bpf/cgroup.c
@@ -24,6 +24,20 @@ 
 DEFINE_STATIC_KEY_ARRAY_FALSE(cgroup_bpf_enabled_key, MAX_CGROUP_BPF_ATTACH_TYPE);
 EXPORT_SYMBOL(cgroup_bpf_enabled_key);
 
+bool cgroup_bpf_current_enabled(enum cgroup_bpf_attach_type type)
+{
+	struct cgroup *cgrp;
+	struct bpf_prog_array *array;
+
+	rcu_read_lock();
+	cgrp = task_dfl_cgroup(current);
+	rcu_read_unlock();
+
+	array = rcu_access_pointer(cgrp->bpf.effective[type]);
+	return array != &bpf_empty_prog_array.hdr;
+}
+EXPORT_SYMBOL(cgroup_bpf_current_enabled);
+
 /* __always_inline is necessary to prevent indirect call through run_prog
  * function pointer.
  */